From 8ca558920347733ddf7a924463c93620e976a3f3 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Tue, 28 Apr 2015 00:29:53 -0700 Subject: [PATCH 01/30] KAFKA-1690. new java producer needs ssl support as a client. --- checkstyle/checkstyle.xml | 1 - checkstyle/import-control.xml | 14 +- .../java/org/apache/kafka/clients/ClientUtils.java | 14 + .../apache/kafka/clients/CommonClientConfigs.java | 2 + .../kafka/clients/consumer/ConsumerConfig.java | 11 +- .../kafka/clients/consumer/KafkaConsumer.java | 5 +- .../kafka/clients/producer/KafkaProducer.java | 5 +- .../kafka/clients/producer/ProducerConfig.java | 8 +- .../apache/kafka/common/config/SecurityConfig.java | 113 ++++++ .../apache/kafka/common/network/Authenticator.java | 60 +++ .../org/apache/kafka/common/network/Channel.java | 133 +++++++ .../kafka/common/network/DefaultAuthenticator.java | 46 +++ .../common/network/PlainTextTransportLayer.java | 148 +++++++ .../apache/kafka/common/network/SSLFactory.java | 188 +++++++++ .../kafka/common/network/SSLTransportLayer.java | 430 +++++++++++++++++++++ .../apache/kafka/common/network/Selectable.java | 1 + .../org/apache/kafka/common/network/Selector.java | 129 +++++-- .../kafka/common/network/TransportLayer.java | 86 +++++ .../kafka/common/protocol/SecurityProtocol.java | 2 + .../java/org/apache/kafka/common/utils/Utils.java | 11 + .../apache/kafka/common/network/EchoServer.java | 108 ++++++ .../kafka/common/network/SSLSelectorTest.java | 97 +++++ .../apache/kafka/common/network/SelectorTest.java | 79 +--- .../org/apache/kafka/common/utils/UtilsTest.java | 2 + .../java/org/apache/kafka/test/TestSSLUtils.java | 208 ++++++++++ 25 files changed, 1780 insertions(+), 121 deletions(-) create mode 100644 clients/src/main/java/org/apache/kafka/common/config/SecurityConfig.java create mode 100644 clients/src/main/java/org/apache/kafka/common/network/Authenticator.java create mode 100644 clients/src/main/java/org/apache/kafka/common/network/Channel.java create mode 100644 clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java create mode 100644 clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java create mode 100644 clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java create mode 100644 clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java create mode 100644 clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java create mode 100644 clients/src/test/java/org/apache/kafka/common/network/EchoServer.java create mode 100644 clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java create mode 100644 clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java diff --git a/checkstyle/checkstyle.xml b/checkstyle/checkstyle.xml index a215ff3..5fbf562 100644 --- a/checkstyle/checkstyle.xml +++ b/checkstyle/checkstyle.xml @@ -33,7 +33,6 @@ - diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index f2e6cec..e649189 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -26,6 +26,9 @@ + + + @@ -33,14 +36,18 @@ - + + + + + @@ -51,6 +58,9 @@ + + + @@ -73,6 +83,7 @@ + @@ -80,6 +91,7 @@ + diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index 0d68bf1..54a554f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -13,17 +13,22 @@ package org.apache.kafka.clients; import java.io.Closeable; +import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.List; +import java.util.Properties; import java.util.concurrent.atomic.AtomicReference; import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.config.SecurityConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.kafka.common.utils.Utils.getHost; import static org.apache.kafka.common.utils.Utils.getPort; +import static org.apache.kafka.common.utils.Utils.loadProps; + public class ClientUtils { private static final Logger log = LoggerFactory.getLogger(ClientUtils.class); @@ -61,4 +66,13 @@ public class ClientUtils { } } } + + public static SecurityConfig parseSecurityConfig(String securityConfigFile) throws IOException { + Properties securityProps = new Properties(); + if (securityConfigFile == null || securityConfigFile == "") { + return new SecurityConfig(securityProps); + } + securityProps = loadProps(securityConfigFile); + return new SecurityConfig(securityProps); + } } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java index cf32e4e..0b23875 100644 --- a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java +++ b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java @@ -55,4 +55,6 @@ public class CommonClientConfigs { public static final String METRIC_REPORTER_CLASSES_CONFIG = "metric.reporters"; public static final String METRIC_REPORTER_CLASSES_DOC = "A list of classes to use as metrics reporters. Implementing the MetricReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics."; + public static final String SECURITY_CONFIG_FILE_CONFIG = "security.config.file"; + public static final String SECURITY_CONFIG_FILE_DOC = "Kafka client security related config file."; } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java index bdff518..190fe63 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java @@ -156,6 +156,10 @@ public class ConsumerConfig extends AbstractConfig { public static final String VALUE_DESERIALIZER_CLASS_CONFIG = "value.deserializer"; private static final String VALUE_DESERIALIZER_CLASS_DOC = "Deserializer class for value that implements the Deserializer interface."; + /** security.config.file */ + public static final String SECURITY_CONFIG_FILE_CONFIG = CommonClientConfigs.SECURITY_CONFIG_FILE_CONFIG; + private static final String SECURITY_CONFIG_FILE_DOC = CommonClientConfigs.SECURITY_CONFIG_FILE_DOC; + static { CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, @@ -277,7 +281,12 @@ public class ConsumerConfig extends AbstractConfig { .define(VALUE_DESERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, - VALUE_DESERIALIZER_CLASS_DOC); + VALUE_DESERIALIZER_CLASS_DOC) + .define(SECURITY_CONFIG_FILE_CONFIG, + Type.STRING, + "", + Importance.MEDIUM, + SECURITY_CONFIG_FILE_DOC); } public static Map addDeserializerToConfig(Map configs, diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java index d301be4..032fd4b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java @@ -37,6 +37,7 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.SecurityConfig; import org.apache.kafka.common.metrics.JmxReporter; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.MetricName; @@ -361,6 +362,7 @@ public class KafkaConsumer implements Consumer { private final boolean autoCommit; private final long autoCommitIntervalMs; private final ConsumerRebalanceCallback rebalanceCallback; + private final SecurityConfig securityConfig; private long lastCommitAttemptMs; private boolean closed = false; @@ -472,7 +474,8 @@ public class KafkaConsumer implements Consumer { String metricGrpPrefix = "consumer"; Map metricsTags = new LinkedHashMap(); metricsTags.put("client-id", clientId); - this.client = new NetworkClient(new Selector(metrics, time, metricGrpPrefix, metricsTags), + this.securityConfig = ClientUtils.parseSecurityConfig(config.getString(ConsumerConfig.SECURITY_CONFIG_FILE_CONFIG)); + this.client = new NetworkClient(new Selector(metrics, time, metricGrpPrefix, metricsTags, securityConfig), this.metadata, clientId, 100, // a fixed large enough value will suffice diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java index 8e336a3..2c21dee 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java @@ -32,6 +32,7 @@ import org.apache.kafka.common.Metric; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.config.SecurityConfig; import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.RecordTooLargeException; @@ -138,6 +139,7 @@ public class KafkaProducer implements Producer { private final Serializer keySerializer; private final Serializer valueSerializer; private final ProducerConfig producerConfig; + private final SecurityConfig securityConfig; /** * A producer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings @@ -214,6 +216,7 @@ public class KafkaProducer implements Producer { this.maxRequestSize = config.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG); this.totalMemorySize = config.getLong(ProducerConfig.BUFFER_MEMORY_CONFIG); this.compressionType = CompressionType.forName(config.getString(ProducerConfig.COMPRESSION_TYPE_CONFIG)); + this.securityConfig = ClientUtils.parseSecurityConfig(config.getString(ProducerConfig.SECURITY_CONFIG_FILE_CONFIG)); Map metricTags = new LinkedHashMap(); metricTags.put("client-id", clientId); this.accumulator = new RecordAccumulator(config.getInt(ProducerConfig.BATCH_SIZE_CONFIG), @@ -228,7 +231,7 @@ public class KafkaProducer implements Producer { List addresses = ClientUtils.parseAndValidateAddresses(config.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)); this.metadata.update(Cluster.bootstrap(addresses), time.milliseconds()); - NetworkClient client = new NetworkClient(new Selector(this.metrics, time, "producer", metricTags), + NetworkClient client = new NetworkClient(new Selector(this.metrics, time, "producer", metricTags, securityConfig), this.metadata, clientId, config.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION), diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java index 187d000..83506e7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java @@ -169,6 +169,11 @@ public class ProducerConfig extends AbstractConfig { public static final String VALUE_SERIALIZER_CLASS_CONFIG = "value.serializer"; private static final String VALUE_SERIALIZER_CLASS_DOC = "Serializer class for value that implements the Serializer interface."; + /** security.config.file */ + public static final String SECURITY_CONFIG_FILE_CONFIG = CommonClientConfigs.SECURITY_CONFIG_FILE_CONFIG; + private static final String SECURITY_CONFIG_FILE_DOC = CommonClientConfigs.SECURITY_CONFIG_FILE_DOC; + + static { CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, Importance.HIGH, CommonClientConfigs.BOOSTRAP_SERVERS_DOC) .define(BUFFER_MEMORY_CONFIG, Type.LONG, 32 * 1024 * 1024L, atLeast(0L), Importance.HIGH, BUFFER_MEMORY_DOC) @@ -217,7 +222,8 @@ public class ProducerConfig extends AbstractConfig { Importance.LOW, MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_DOC) .define(KEY_SERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, KEY_SERIALIZER_CLASS_DOC) - .define(VALUE_SERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, VALUE_SERIALIZER_CLASS_DOC); + .define(VALUE_SERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, VALUE_SERIALIZER_CLASS_DOC) + .define(SECURITY_CONFIG_FILE_CONFIG, Type.STRING, "", Importance.MEDIUM, SECURITY_CONFIG_FILE_DOC); } public static Map addSerializerToConfig(Map configs, diff --git a/clients/src/main/java/org/apache/kafka/common/config/SecurityConfig.java b/clients/src/main/java/org/apache/kafka/common/config/SecurityConfig.java new file mode 100644 index 0000000..7954a7e --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/config/SecurityConfig.java @@ -0,0 +1,113 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package org.apache.kafka.common.config; + + +import java.util.Map; + +import org.apache.kafka.common.config.ConfigDef.Importance; +import org.apache.kafka.common.config.ConfigDef.Type; +import org.apache.kafka.common.protocol.SecurityProtocol; + + +/** + * Security Related config for clients and server. + */ + +public class SecurityConfig extends AbstractConfig { + /* + * NOTE: DO NOT CHANGE EITHER CONFIG STRINGS OR THEIR JAVA VARIABLE NAMES AS THESE ARE PART OF THE PUBLIC API AND + * CHANGE WILL BREAK USER CODE. + */ + + private static final ConfigDef CONFIG; + + public static final String SECURITY_PROTOCOL_CONFIG = "security.protocol"; + public static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. Currently only PLAINTEXT and SSL are supported."; + + public static final String SSL_PROTOCOL_CONFIG = "ssl.protocol"; + public static final String SSL_PROTOCOL_DOC = "The TLS protocol used for broker connections if security protocol is SSL. " + + "Any version of TLS is accepted by default."; + + public static final String SSL_CIPHER_SUITES_CONFIG = "ssl.cipher.suites"; + public static final String SSL_CIPHER_SUITES_DOC = "The list of cipher suites enabled for SSL connections. " + + "Default value is the list of cipher suites enabled for the Java Virtual Machine."; + + public static final String SSL_ENABLED_PROTOCOLS_CONFIG = "ssl.enabled.protocols"; + public static final String SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. " + + "Default value is the list of protocols enabled for the Java Virtual Machine."; + + + public static final String SSL_KEYSTORE_TYPE_CONFIG = "ssl.keystore.type"; + public static final String SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. " + + "Default value is the default key store format of the Java Virtual Machine."; + + public static final String SSL_KEYSTORE_LOCATION_CONFIG = "ssl.keystore.location"; + public static final String SSL_KEYSTORE_LOCATION_DOC = "The location of the key store file. " + + "This is optional for Client and can be used for two-way authentication for client."; + + public static final String SSL_KEYSTORE_PASSWORD_CONFIG = "ssl.keystore.password"; + public static final String SSL_KEYSTORE_PASSWORD_DOC = "The store password for the key store file. "; + + + public static final String SSL_KEY_PASSWORD_CONFIG = "ssl.key.password"; + public static final String SSL_KEY_PASSWORD_DOC = "The password of the private key in the key store file. " + + "This is optional for client."; + + public static final String SSL_TRUSTSTORE_TYPE_CONFIG = "ssl.truststore.type"; + public static final String SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. " + + "Default value is JKS."; + + public static final String SSL_TRUSTSTORE_LOCATION_CONFIG = "ssl.truststore.location"; + public static final String SSL_TRUSTSTORE_LOCATION_DOC = "The location of the trust store file. "; + + public static final String SSL_TRUSTSTORE_PASSWORD_CONFIG = "ssl.truststore.password"; + public static final String SSL_TRUSTSTORE_PASSWORD_DOC = "The password for the trust store file. "; + + public static final String SSL_CLIENT_REQUIRE_CERT_CONFIG = "ssl.client.require.cert"; + public static final String SSL_CLIENT_REQUIRE_CERT_DOC = "This is to enforce two-way authentication between client and server." + + "Default value is false. If set to true client need to prover Keystrore releated config"; + + public static final String SSL_KEYMANAGER_ALGORITHM_CONFIG = "ssl.keymanager.algorithm"; + public static final String SSL_KEYMANAGER_ALGORITHM_DOC = "The algorithm used by key manager factory for SSL connections. " + + "Default value is the key manager factory algorithm configured for the Java Virtual Machine."; + + public static final String SSL_TRUSTMANAGER_ALGORITHM_CONFIG = "ssl.trustmanager.algorithm"; + public static final String SSL_TRUSTMANAGER_ALGORITHM_DOC = "The algorithm used by trust manager factory for SSL connections. " + + "Default value is the trust manager factory algorithm configured for the Java Virtual Machine."; + + + static { + CONFIG = new ConfigDef().define(SECURITY_PROTOCOL_CONFIG, Type.STRING, SecurityProtocol.PLAINTEXT.toString(), Importance.MEDIUM, SECURITY_PROTOCOL_DOC) + .define(SSL_PROTOCOL_CONFIG, Type.STRING, "TLS", Importance.MEDIUM, SSL_PROTOCOL_DOC) + .define(SSL_CIPHER_SUITES_CONFIG, Type.LIST, Importance.MEDIUM, SSL_CIPHER_SUITES_DOC, false) + .define(SSL_ENABLED_PROTOCOLS_CONFIG, Type.LIST, "TLSv1.2, TLSv1.1, TLSv1", Importance.MEDIUM, SSL_ENABLED_PROTOCOLS_DOC) + .define(SSL_KEYSTORE_TYPE_CONFIG, Type.STRING, Importance.MEDIUM, SSL_KEYSTORE_TYPE_DOC, false) + .define(SSL_KEYSTORE_LOCATION_CONFIG, Type.STRING, Importance.MEDIUM, SSL_KEYSTORE_LOCATION_DOC, false) + .define(SSL_KEYSTORE_PASSWORD_CONFIG, Type.STRING, Importance.MEDIUM, SSL_KEYSTORE_PASSWORD_DOC, false) + .define(SSL_KEY_PASSWORD_CONFIG, Type.STRING, Importance.MEDIUM, SSL_KEY_PASSWORD_DOC, false) + .define(SSL_TRUSTSTORE_TYPE_CONFIG, Type.STRING, Importance.MEDIUM, SSL_TRUSTSTORE_TYPE_DOC, false) + .define(SSL_TRUSTSTORE_LOCATION_CONFIG, Type.STRING, Importance.MEDIUM, SSL_TRUSTSTORE_LOCATION_DOC, false) + .define(SSL_TRUSTSTORE_PASSWORD_CONFIG, Type.STRING, Importance.MEDIUM, SSL_TRUSTSTORE_PASSWORD_DOC, false) + .define(SSL_KEYMANAGER_ALGORITHM_CONFIG, Type.STRING, Importance.MEDIUM, SSL_KEYMANAGER_ALGORITHM_DOC, false) + .define(SSL_TRUSTMANAGER_ALGORITHM_CONFIG, Type.STRING, Importance.MEDIUM, SSL_TRUSTMANAGER_ALGORITHM_DOC, false) + .define(SSL_CLIENT_REQUIRE_CERT_CONFIG, Type.BOOLEAN, false, Importance.MEDIUM, SSL_CLIENT_REQUIRE_CERT_DOC); + } + + public SecurityConfig(Map props) { + super(CONFIG, props); + } + + +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java b/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java new file mode 100644 index 0000000..ee8516f --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.network; + +/* + * Authentication for Channel + */ + +import java.io.IOException; +import com.sun.security.auth.UserPrincipal; + + +public interface Authenticator { + + /** + * Closes this channel + * + * @throws IOException if any I/O error occurs + */ + void close() throws IOException; + + /** + * + * @throws IOException + */ + void init() throws IOException; + + /** + * Returns UserPrincipal after authentication is established + */ + UserPrincipal userPrincipal(); + + + /** + * Does authentication and returns SelectionKey.OP if further communication needed + */ + int authenticate(boolean read, boolean write) throws IOException; + + /** + * returns true if authentication is complete otherwise returns false; + */ + + boolean isComplete(); + +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/Channel.java b/clients/src/main/java/org/apache/kafka/common/network/Channel.java new file mode 100644 index 0000000..3526ba3 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/network/Channel.java @@ -0,0 +1,133 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.network; + + +import java.io.IOException; +import java.io.DataInputStream; +import java.io.DataOutputStream; + +import java.nio.ByteBuffer; +import java.nio.channels.ScatteringByteChannel; +import java.nio.channels.GatheringByteChannel; +import java.nio.channels.SocketChannel; + +import com.sun.security.auth.UserPrincipal; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * + */ + +public class Channel implements ScatteringByteChannel, GatheringByteChannel { + private static final Logger log = LoggerFactory.getLogger(Channel.class); + private TransportLayer transportLayer; + private Authenticator authenticator; + + + public Channel(TransportLayer transportLayer, Authenticator authenticator) throws IOException { + this.transportLayer = transportLayer; + this.authenticator = authenticator; + this.authenticator.init(); + } + + public void close() throws IOException { + transportLayer.close(); + authenticator.close(); + } + + /** + * returns user principal for the session + * Incase of PLAINTEXT and No Authentication returns ANONYMOUS as the userPrincipal + * If SSL used without any SASL Authentication returns SSLSession.peerPrincipal + */ + public UserPrincipal userPrincipal() { + return authenticator.userPrincipal(); + } + + public int connect(boolean read, boolean write) throws IOException { + if (transportLayer.isReady() && authenticator.isComplete()) + return 0; + int status = 0; + if (!transportLayer.isReady()) + status = transportLayer.handshake(read, write); + if (status == 0 && !authenticator.isComplete()) + status = authenticator.authenticate(read, write); + return status; + } + + + public boolean isOpen() { + return transportLayer.isOpen(); + } + + public SocketChannel socketChannel() { + return transportLayer.socketChannel(); + } + + /** + * Writes a sequence of bytes to this channel from the given buffer. + */ + @Override + public int write(ByteBuffer src) throws IOException { + return transportLayer.write(src); + } + + @Override + public long write(ByteBuffer[] srcs) throws IOException { + return transportLayer.write(srcs); + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + return transportLayer.write(srcs, offset, length); + } + + @Override + public int read(ByteBuffer dst) throws IOException { + return transportLayer.read(dst); + } + + @Override + public long read(ByteBuffer[] dsts) throws IOException { + return transportLayer.read(dsts); + } + + @Override + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + return transportLayer.read(dsts, offset, length); + } + + public boolean finishConnect() throws IOException { + return transportLayer.finishConnect(); + } + + public boolean isReady() { + return transportLayer.isReady() && authenticator.isComplete(); + } + + public DataInputStream getInputStream() throws IOException { + return transportLayer.inStream(); + } + + public DataOutputStream getOutputStream() throws IOException { + return transportLayer.outStream(); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java new file mode 100644 index 0000000..c1ec794 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.network; + +import com.sun.security.auth.UserPrincipal; +import java.io.IOException; + +public class DefaultAuthenticator implements Authenticator { + + TransportLayer transportLayer; + + public DefaultAuthenticator(TransportLayer transportLayer) { + this.transportLayer = transportLayer; + } + + public void init() {} + + public int authenticate(boolean read, boolean write) throws IOException { + return 0; + } + + public UserPrincipal userPrincipal() { + return new UserPrincipal(transportLayer.getPeerPrincipal().toString()); + } + + public void close() throws IOException {} + + public boolean isComplete() { + return true; + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java new file mode 100644 index 0000000..11cd80c --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java @@ -0,0 +1,148 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.network; + +/* + * Transport layer for PLAINTEXT communication + */ + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SocketChannel; + +import java.io.DataInputStream; +import java.io.DataOutputStream; + +import java.security.Principal; +import com.sun.security.auth.UserPrincipal; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PlainTextTransportLayer implements TransportLayer { + private static final Logger log = LoggerFactory.getLogger(PlainTextTransportLayer.class); + SocketChannel socketChannel = null; + DataInputStream inStream = null; + DataOutputStream outStream = null; + + public PlainTextTransportLayer(SocketChannel socketChannel) throws IOException { + this.socketChannel = socketChannel; + + } + + + /** + * Closes this channel + * + * @throws IOException If and I/O error occurs + */ + public void close() throws IOException { + socketChannel.socket().close(); + socketChannel.close(); + } + + /** + * Flushes the buffer to the network, non blocking + * @param buf ByteBuffer + * @return boolean true if the buffer has been emptied out, false otherwise + * @throws IOException + */ + public boolean flush(ByteBuffer buf) throws IOException { + int remaining = buf.remaining(); + if (remaining > 0) { + int written = socketChannel.write(buf); + return written >= remaining; + } + return true; + } + + /** + * Tells wheter or not this channel is open. + */ + public boolean isOpen() { + return socketChannel.isOpen(); + } + + /** + * Writes a sequence of bytes to this channel from the given buffer. + */ + public int write(ByteBuffer src) throws IOException { + return socketChannel.write(src); + } + + public long write(ByteBuffer[] srcs) throws IOException { + return socketChannel.write(srcs); + } + + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + return socketChannel.write(srcs, offset, length); + } + + public int read(ByteBuffer dst) throws IOException { + return socketChannel.read(dst); + } + + public long read(ByteBuffer[] dsts) throws IOException { + return socketChannel.read(dsts); + } + + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + return socketChannel.read(dsts, offset, length); + } + + public boolean isReady() { + return true; + } + + public SocketChannel socketChannel() { + return socketChannel; + } + + public boolean finishConnect() throws IOException { + return socketChannel.finishConnect(); + } + + /** + * Performs SSL handshake hence is a no-op for the non-secure + * implementation + * @param read Unused in non-secure implementation + * @param write Unused in non-secure implementation + * @return Always return 0 + * @throws IOException + */ + public int handshake(boolean read, boolean write) throws IOException { + return 0; + } + + public DataInputStream inStream() throws IOException { + if (inStream == null) + this.inStream = new DataInputStream(socketChannel.socket().getInputStream()); + return inStream; + } + + public DataOutputStream outStream() throws IOException { + if (outStream == null) + this.outStream = new DataOutputStream(socketChannel.socket().getOutputStream()); + return outStream; + } + + public Principal getPeerPrincipal() { + return new UserPrincipal("ANONYMOUS"); + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java new file mode 100644 index 0000000..9cf9051 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java @@ -0,0 +1,188 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.network; + +import java.io.FileInputStream; +import java.io.IOException; +import java.security.GeneralSecurityException; +import java.security.KeyStore; + +import javax.net.ssl.*; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.config.SecurityConfig; + + +public class SSLFactory { + + public enum Mode { CLIENT, SERVER }; + private String protocol; + private String provider; + private String kmfAlgorithm; + private String tmfAlgorithm; + private SecurityStore keystore = null; + private String keyPassword; + private SecurityStore truststore; + private String[] cipherSuites; + private String[] enabledProtocols; + private SSLContext sslContext; + private boolean requireClientCert; + private Mode mode; + + + public SSLFactory(Mode mode) { + this.mode = mode; + } + + + public void init(SecurityConfig securityConfig) throws IOException, GeneralSecurityException { + this.protocol = securityConfig.getString(SecurityConfig.SSL_PROTOCOL_CONFIG); + if (securityConfig.getList(SecurityConfig.SSL_CIPHER_SUITES_CONFIG) != null) + this.cipherSuites = (String[]) securityConfig.getList(SecurityConfig.SSL_CIPHER_SUITES_CONFIG).toArray(); + if (securityConfig.getList(SecurityConfig.SSL_ENABLED_PROTOCOLS_CONFIG) != null) + this.enabledProtocols = (String[]) securityConfig.getList(SecurityConfig.SSL_ENABLED_PROTOCOLS_CONFIG).toArray(); + this.requireClientCert = securityConfig.getBoolean(SecurityConfig.SSL_CLIENT_REQUIRE_CERT_CONFIG); + this.kmfAlgorithm = securityConfig.getString(SecurityConfig.SSL_KEYMANAGER_ALGORITHM_CONFIG); + this.tmfAlgorithm = securityConfig.getString(SecurityConfig.SSL_TRUSTMANAGER_ALGORITHM_CONFIG); + if ((mode == Mode.CLIENT && requireClientCert) || (mode == Mode.SERVER)) + createKeystore(securityConfig.getString(SecurityConfig.SSL_KEYSTORE_TYPE_CONFIG), + securityConfig.getString(SecurityConfig.SSL_KEYSTORE_LOCATION_CONFIG), + securityConfig.getString(SecurityConfig.SSL_KEYSTORE_PASSWORD_CONFIG), + securityConfig.getString(SecurityConfig.SSL_KEY_PASSWORD_CONFIG)); + createTruststore(securityConfig.getString(SecurityConfig.SSL_TRUSTSTORE_TYPE_CONFIG), + securityConfig.getString(SecurityConfig.SSL_TRUSTSTORE_LOCATION_CONFIG), + securityConfig.getString(SecurityConfig.SSL_TRUSTSTORE_PASSWORD_CONFIG)); + + this.sslContext = createSSLContext(); + + } + + + private SSLContext createSSLContext() throws GeneralSecurityException, IOException { + SSLContext sslContext; + if (provider != null) + sslContext = SSLContext.getInstance(protocol, provider); + else + sslContext = SSLContext.getInstance(protocol); + + KeyManager[] keyManagers = null; + if (keystore != null) { + String kmfAlgorithm = this.kmfAlgorithm != null ? this.kmfAlgorithm : KeyManagerFactory.getDefaultAlgorithm(); + KeyManagerFactory kmf = KeyManagerFactory.getInstance(kmfAlgorithm); + KeyStore ks = keystore.load(); + String keyPassword = this.keyPassword != null ? this.keyPassword : keystore.password; + kmf.init(ks, keyPassword.toCharArray()); + keyManagers = kmf.getKeyManagers(); + } + + String tmfAlgorithm = this.tmfAlgorithm != null ? this.tmfAlgorithm : TrustManagerFactory.getDefaultAlgorithm(); + TrustManagerFactory tmf = TrustManagerFactory.getInstance(tmfAlgorithm); + KeyStore ts = truststore == null ? null : truststore.load(); + tmf.init(ts); + + sslContext.init(keyManagers, tmf.getTrustManagers(), null); + return sslContext; + } + + public SSLEngine createSSLEngine(String peerHost, int peerPort) { + SSLEngine sslEngine = sslContext.createSSLEngine(peerHost, peerPort); + if (cipherSuites != null) sslEngine.setEnabledCipherSuites(cipherSuites); + if (mode == Mode.SERVER) { + sslEngine.setUseClientMode(false); + } else { + sslEngine.setUseClientMode(true); + sslEngine.setNeedClientAuth(requireClientCert); + } + if (enabledProtocols != null) sslEngine.setEnabledProtocols(enabledProtocols); + return sslEngine; + } + + /** + * Returns a configured SSLServerSocketFactory. + * + * @return the configured SSLSocketFactory. + * @throws GeneralSecurityException thrown if the SSLSocketFactory could not + * be initialized. + * @throws IOException thrown if and IO error occurred while loading + * the server keystore. + */ + public SSLServerSocketFactory createSSLServerSocketFactory() throws GeneralSecurityException, IOException { + if (mode != Mode.SERVER) { + throw new IllegalStateException("Factory is in CLIENT mode"); + } + return sslContext.getServerSocketFactory(); + } + + /** + * Returns if client certificates are required or not. + * + * @return if client certificates are required or not. + */ + public boolean isClientCertRequired() { + return requireClientCert; + } + + + private void createKeystore(String type, String path, String password, String keyPassword) { + if (path == null && password != null) { + throw new KafkaException("SSL key store password is not specified."); + } else if (path != null && password == null) { + throw new KafkaException("SSL key store is not specified, but key store password is specified."); + } else if (path != null && password != null) { + this.keystore = new SecurityStore(type, path, password); + this.keyPassword = keyPassword; + } + } + + private void createTruststore(String type, String path, String password) { + if (path == null && password != null) { + throw new KafkaException("SSL key store password is not specified."); + } else if (path != null && password == null) { + throw new KafkaException("SSL key store is not specified, but key store password is specified."); + } else if (path != null && password != null) { + this.truststore = new SecurityStore(type, path, password); + } + } + + + private class SecurityStore { + private final String type; + private final String path; + private final String password; + + private SecurityStore(String type, String path, String password) { + this.type = type == null ? KeyStore.getDefaultType() : type; + this.path = path; + this.password = password; + } + + private KeyStore load() throws GeneralSecurityException, IOException { + FileInputStream in = null; + try { + KeyStore ks = KeyStore.getInstance(type); + in = new FileInputStream(path); + ks.load(in, password.toCharArray()); + return ks; + } finally { + if (in != null) in.close(); + } + } + } + + public void close() { + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java new file mode 100644 index 0000000..dc84975 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -0,0 +1,430 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.network; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SocketChannel; +import java.nio.channels.SelectionKey; + +import java.security.Principal; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLEngineResult; +import javax.net.ssl.SSLEngineResult.HandshakeStatus; +import javax.net.ssl.SSLEngineResult.Status; + +import java.io.DataInputStream; +import java.io.DataOutputStream; + +import org.apache.kafka.common.utils.Utils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/* + * Transport layer for SSL communication + */ + +public class SSLTransportLayer implements TransportLayer { + private static final Logger log = LoggerFactory.getLogger(SSLTransportLayer.class); + SocketChannel socketChannel; + SSLEngine sslEngine; + HandshakeStatus handshakeStatus = null; + SSLEngineResult handshakeResult = null; + boolean handshakeComplete = false; + boolean closed = false; + boolean closing = false; + ByteBuffer netInBuffer = null; + ByteBuffer netOutBuffer = null; + ByteBuffer appReadBuffer = null; + ByteBuffer appWriteBuffer = null; + ByteBuffer emptyBuf = ByteBuffer.allocate(0); + DataInputStream inStream = null; + DataOutputStream outStream = null; + + + public SSLTransportLayer(SocketChannel socketChannel, SSLEngine sslEngine) throws IOException { + this.socketChannel = socketChannel; + this.sslEngine = sslEngine; + this.netInBuffer = ByteBuffer.allocateDirect(sslEngine.getSession().getPacketBufferSize()); + this.netOutBuffer = ByteBuffer.allocateDirect(sslEngine.getSession().getPacketBufferSize()); + this.appWriteBuffer = ByteBuffer.allocateDirect(sslEngine.getSession().getApplicationBufferSize()); + this.appReadBuffer = ByteBuffer.allocateDirect(sslEngine.getSession().getApplicationBufferSize()); + startHandshake(); + } + + public void startHandshake() throws IOException { + netOutBuffer.position(0); + netOutBuffer.limit(0); + netInBuffer.position(0); + netInBuffer.limit(0); + handshakeComplete = false; + closed = false; + closing = false; + //initiate handshake + sslEngine.beginHandshake(); + handshakeStatus = sslEngine.getHandshakeStatus(); + } + + public SocketChannel socketChannel() { + return socketChannel; + } + + public boolean finishConnect() throws IOException { + return socketChannel.finishConnect(); + } + + /** + * Flushes the buffer to the network, non blocking + * @param buf ByteBuffer + * @return boolean true if the buffer has been emptied out, false otherwise + * @throws IOException + */ + public boolean flush(ByteBuffer buf) throws IOException { + int remaining = buf.remaining(); + if (remaining > 0) { + int written = socketChannel.write(buf); + return written >= remaining; + } + return true; + } + + /** + * Performs SSL handshake, non blocking. + * The return for this operation is 0 if the handshake is complete and a positive value if it is not complete. + * In the event of a positive value coming back, re-register the selection key for the return values interestOps. + * @param read boolean - true if the underlying channel is readable + * @param write boolean - true if the underlying channel is writable + * @return int - 0 if hand shake is complete, otherwise it returns a SelectionKey interestOps value + * @throws IOException + */ + public int handshake(boolean read, boolean write) throws IOException { + if (handshakeComplete) return 0; //we have done our initial handshake + + if (!flush(netOutBuffer)) return SelectionKey.OP_WRITE; + + switch(handshakeStatus) { + case NOT_HANDSHAKING: + // SSLEnginge.getHandshakeStatus is transient and it doesn't record FINISHED status properly + if (handshakeResult.getHandshakeStatus() == HandshakeStatus.FINISHED) { + handshakeComplete = !netOutBuffer.hasRemaining(); + if (handshakeComplete) + return 0; + else + return SelectionKey.OP_WRITE; + } else { + //should never happen + throw new IOException("NOT_HANDSHAKING during handshake"); + } + case FINISHED: + //we are complete if we have delivered the last package + handshakeComplete = !netOutBuffer.hasRemaining(); + //return 0 if we are complete, otherwise we still have data to write + if (handshakeComplete) return 0; + else return SelectionKey.OP_WRITE; + case NEED_WRAP: + handshakeResult = handshakeWrap(write); + if (handshakeResult.getStatus() == Status.OK) { + if (handshakeStatus == HandshakeStatus.NEED_TASK) + handshakeStatus = tasks(); + } else { + //wrap should always work with our buffers + throw new IOException("Unexpected status [" + handshakeResult.getStatus() + "] during handshake WRAP."); + } + if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || (!flush(netOutBuffer))) + return SelectionKey.OP_WRITE; + //fall down to NEED_UNWRAP on the same call, will result in a + //BUFFER_UNDERFLOW if it needs data + case NEED_UNWRAP: + handshakeResult = handshakeUnwrap(read); + if (handshakeResult.getStatus() == Status.OK) { + if (handshakeStatus == HandshakeStatus.NEED_TASK) + handshakeStatus = tasks(); + } else if (handshakeResult.getStatus() == Status.BUFFER_UNDERFLOW) { + return SelectionKey.OP_READ; + } else { + throw new IOException(String.format("Unexpected status [%s] during handshake UNWRAP", handshakeStatus)); + } + break; + case NEED_TASK: + handshakeStatus = tasks(); + break; + default: + throw new IllegalStateException(String.format("Unexpected status [%s]", handshakeStatus)); + } + //return 0 if we are complete, otherwise re-register for any activity that + //would cause this method to be called again. + if (handshakeComplete) return 0; + else return SelectionKey.OP_WRITE | SelectionKey.OP_READ; + } + + /** + * Executes all the tasks needed on the same thread. + * @return HandshakeStatus + */ + private HandshakeStatus tasks() { + Runnable r = null; + while ((r = sslEngine.getDelegatedTask()) != null) r.run(); + return sslEngine.getHandshakeStatus(); + } + + /** + * Performs the WRAP function + * @param doWrite boolean + * @return SSLEngineResult + * @throws IOException + */ + private SSLEngineResult handshakeWrap(Boolean doWrite) throws IOException { + //this should never be called with a network buffer that contains data + //so we can clear it here. + netOutBuffer.clear(); + SSLEngineResult result = sslEngine.wrap(appWriteBuffer, netOutBuffer); + //prepare the results to be written + netOutBuffer.flip(); + handshakeStatus = result.getHandshakeStatus(); + //optimization, if we do have a writable channel, write it now + if (doWrite) flush(netOutBuffer); + return result; + } + + /** + * Perform handshake unwrap + * @param doRead boolean + * @return SSLEngineResult + * @throws IOException + */ + private SSLEngineResult handshakeUnwrap(Boolean doRead) throws IOException { + if (netInBuffer.position() == netInBuffer.limit()) { + //clear the buffer if we have emptied it out on data + netInBuffer.clear(); + } + + if (doRead) { + int read = socketChannel.read(netInBuffer); + if (read == -1) throw new IOException("EOF during handshake."); + } + + SSLEngineResult result; + boolean cont = false; + do { + //prepare the buffer with the incoming data + netInBuffer.flip(); + result = sslEngine.unwrap(netInBuffer, appWriteBuffer); + netInBuffer.compact(); + handshakeStatus = result.getHandshakeStatus(); + if (result.getStatus() == SSLEngineResult.Status.OK && + result.getHandshakeStatus() == HandshakeStatus.NEED_TASK) { + handshakeStatus = tasks(); + } + cont = result.getStatus() == SSLEngineResult.Status.OK && + handshakeStatus == HandshakeStatus.NEED_UNWRAP; + } while(cont); + return result; + } + + + public int getOutboundRemaining() { + return netOutBuffer.remaining(); + } + + /** + * Sends a SSL close message, will not physically close the connection here.
+ * @throws IOException if an I/O error occurs + * @throws IOException if there is data on the outgoing network buffer and we are unable to flush it + */ + public void close() throws IOException { + if (closing) return; + closing = true; + sslEngine.closeOutbound(); + + if (!flush(netOutBuffer)) { + throw new IOException("Remaining data in the network buffer, can't send SSL close message, force a close with close(true) instead"); + } + //prep the buffer for the close message + netOutBuffer.clear(); + //perform the close, since we called sslEngine.closeOutbound + SSLEngineResult handshake = sslEngine.wrap(emptyBuf, netOutBuffer); + //we should be in a close state + if (handshake.getStatus() != SSLEngineResult.Status.CLOSED) { + throw new IOException("Invalid close state, will not send network data."); + } + netOutBuffer.flip(); + flush(netOutBuffer); + socketChannel.socket().close(); + socketChannel.close(); + closed = !netOutBuffer.hasRemaining() && (handshake.getHandshakeStatus() != HandshakeStatus.NEED_WRAP); + } + + public boolean isOpen() { + return socketChannel.isOpen(); + } + + public boolean isReady() { + return handshakeComplete; + } + + /** + * Reads a sequence of bytes from this channel into the given buffer. + * + * @param dst The buffer into which bytes are to be transferred + * @return The number of bytes read, possible zero or -1 if the channel has reached end-of-stream + * @throws IOException if some other I/O error occurs + * @throws IllegalStateException if the destination buffer is different than appBufHandler.getReadBuffer() + */ + public int read(ByteBuffer dst) throws IOException { + if (closing || closed) return -1; + if (!handshakeComplete) throw new IllegalStateException("Handshake incomplete."); + netInBuffer = Utils.ensureCapacity(netInBuffer, packetBufferSize()); + int netread = socketChannel.read(netInBuffer); + if (netread == -1) return -1; + int read = 0; + SSLEngineResult unwrap = null; + + do { + netInBuffer.flip(); + unwrap = sslEngine.unwrap(netInBuffer, appReadBuffer); + //compact the buffer + netInBuffer.compact(); + if (unwrap.getStatus() == Status.OK || unwrap.getStatus() == Status.BUFFER_UNDERFLOW) { + read += unwrap.bytesProduced(); + // perform any task if needed + if (unwrap.getHandshakeStatus() == HandshakeStatus.NEED_TASK) tasks(); + //if we need more network data, than return for now. + if (unwrap.getStatus() == Status.BUFFER_UNDERFLOW) return readFromAppBuffer(dst); + } else if (unwrap.getStatus() == Status.BUFFER_OVERFLOW && read > 0) { + appReadBuffer = Utils.ensureCapacity(appReadBuffer, applicationBufferSize()); + //buffer overflow can happen, if we have read data, then + //empty out the dst buffer before we do another read + return readFromAppBuffer(dst); + } else { + //here we should trap BUFFER_OVERFLOW and call expand on the buffer + // for now, throw an exception, as we initialized the buffers + // in constructor + throw new IOException(String.format("Unable to unwrap data, invalid status [%s]", unwrap.getStatus())); + } + } while(netInBuffer.position() != 0); + return readFromAppBuffer(dst); + } + + public long read(ByteBuffer[] dsts) throws IOException { + return read(dsts, 0, dsts.length); + } + + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + int totalRead = 0; + for (int i = offset; i < length; i++) { + int read = read(dsts[i]); + if (read > 0) { + totalRead += read; + } + } + return totalRead; + } + + + /** + * Writes a sequence of bytes to this channel from the given buffer. + * + * @param src The buffer from which bytes are to be retrieved + * @return The number of bytes written, possibly zero + * @throws IOException If some other I/O error occurs + */ + + public int write(ByteBuffer src) throws IOException { + int written = 0; + if (src == this.netOutBuffer) + written = socketChannel.write(src); + else { + if (closing || closed) throw new IOException("Channel is in closing state"); + if (!flush(netOutBuffer)) + return written; + netOutBuffer.clear(); + SSLEngineResult result = sslEngine.wrap(src, netOutBuffer); + written = result.bytesConsumed(); + netOutBuffer.flip(); + if (result.getStatus() == Status.OK) { + if (result.getHandshakeStatus() == HandshakeStatus.NEED_TASK) + tasks(); + } else { + throw new IOException(String.format("Unable to wrap data, invalid status %s", result.getStatus())); + } + flush(netOutBuffer); + } + return written; + } + + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + int totalWritten = 0; + for (int i = offset; i < length; i++) { + if (srcs[i].hasRemaining()) { + int written = write(srcs[i]); + if (written > 0) { + totalWritten += written; + } + } + } + return totalWritten; + } + + public long write(ByteBuffer[] srcs) throws IOException { + return write(srcs, 0, srcs.length); + } + + public DataInputStream inStream() throws IOException { + if (inStream == null) + this.inStream = new DataInputStream(socketChannel.socket().getInputStream()); + return inStream; + } + + public DataOutputStream outStream() throws IOException { + if (outStream == null) + this.outStream = new DataOutputStream(socketChannel.socket().getOutputStream()); + return outStream; + } + + public Principal getPeerPrincipal() { + //return sslEngine.getSession().getPeerPrincipal(); + return null; + } + + private int readFromAppBuffer(ByteBuffer dst) { + appReadBuffer.flip(); + try { + int remaining = appReadBuffer.remaining(); + if (remaining > 0) { + if (remaining > dst.remaining()) + remaining = dst.remaining(); + int i = 0; + while (i < remaining) { + dst.put(appReadBuffer.get()); + i++; + } + } + return remaining; + } finally { + appReadBuffer.compact(); + } + } + + private int packetBufferSize() { + return sslEngine.getSession().getPacketBufferSize(); + } + + private int applicationBufferSize() { + return sslEngine.getSession().getApplicationBufferSize(); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selectable.java b/clients/src/main/java/org/apache/kafka/common/network/Selectable.java index b5f8d83..4a0c2bd 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selectable.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selectable.java @@ -12,6 +12,7 @@ */ package org.apache.kafka.common.network; + import java.io.IOException; import java.net.InetSocketAddress; import java.util.List; diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index 57de058..0068143 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -31,6 +31,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.config.SecurityConfig; import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.MetricName; @@ -40,6 +41,7 @@ import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.Count; import org.apache.kafka.common.metrics.stats.Max; import org.apache.kafka.common.metrics.stats.Rate; +import org.apache.kafka.common.protocol.SecurityProtocol; import org.apache.kafka.common.utils.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,25 +53,25 @@ import org.slf4j.LoggerFactory; * responses. *

* A connection can be added to the selector associated with an integer id by doing - * + * *

  * selector.connect(42, new InetSocketAddress("google.com", server.port), 64000, 64000);
  * 
- * + * * The connect call does not block on the creation of the TCP connection, so the connect method only begins initiating * the connection. The successful invocation of this method does not mean a valid connection has been established. - * + * * Sending requests, receiving responses, processing connection completions, and disconnections on the existing * connections are all done using the poll() call. - * + * *
  * List<NetworkRequest> requestsToSend = Arrays.asList(new NetworkRequest(0, myBytes), new NetworkRequest(1, myOtherBytes));
  * selector.poll(TIMEOUT_MS, requestsToSend);
  * 
- * + * * The selector maintains several lists that are reset by each call to poll() which are available via * various getters. These are reset by each call to poll(). - * + * * This class is not thread safe! */ public class Selector implements Selectable { @@ -78,6 +80,7 @@ public class Selector implements Selectable { private final java.nio.channels.Selector selector; private final Map keys; + private final Map channels; private final List completedSends; private final List completedReceives; private final List disconnected; @@ -87,11 +90,14 @@ public class Selector implements Selectable { private final SelectorMetrics sensors; private final String metricGrpPrefix; private final Map metricTags; + private final SecurityConfig securityConfig; + private final SecurityProtocol securityProtocol; + private SSLFactory sslFactory = null; /** * Create a new selector */ - public Selector(Metrics metrics, Time time, String metricGrpPrefix, Map metricTags) { + public Selector(Metrics metrics, Time time, String metricGrpPrefix, Map metricTags, SecurityConfig securityConfig) { try { this.selector = java.nio.channels.Selector.open(); } catch (IOException e) { @@ -101,12 +107,24 @@ public class Selector implements Selectable { this.metricGrpPrefix = metricGrpPrefix; this.metricTags = metricTags; this.keys = new HashMap(); + this.channels = new HashMap(); this.completedSends = new ArrayList(); this.completedReceives = new ArrayList(); this.connected = new ArrayList(); this.disconnected = new ArrayList(); this.failedSends = new ArrayList(); this.sensors = new SelectorMetrics(metrics); + this.securityConfig = securityConfig; + this.securityProtocol = SecurityProtocol.valueOf(securityConfig.getString(SecurityConfig.SECURITY_PROTOCOL_CONFIG)); + try { + if (securityProtocol == SecurityProtocol.SSL) { + this.sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT); + this.sslFactory.init(this.securityConfig); + } + } catch (Exception e) { + throw new KafkaException(e); + } + } /** @@ -127,25 +145,37 @@ public class Selector implements Selectable { if (this.keys.containsKey(id)) throw new IllegalStateException("There is already a connection for id " + id); - SocketChannel channel = SocketChannel.open(); - channel.configureBlocking(false); - Socket socket = channel.socket(); + SocketChannel socketChannel = SocketChannel.open(); + socketChannel.configureBlocking(false); + Socket socket = socketChannel.socket(); socket.setKeepAlive(true); socket.setSendBufferSize(sendBufferSize); socket.setReceiveBufferSize(receiveBufferSize); socket.setTcpNoDelay(true); try { - channel.connect(address); + socketChannel.connect(address); } catch (UnresolvedAddressException e) { - channel.close(); + socketChannel.close(); throw new IOException("Can't resolve address: " + address, e); } catch (IOException e) { - channel.close(); + socketChannel.close(); throw e; } - SelectionKey key = channel.register(this.selector, SelectionKey.OP_CONNECT); + + TransportLayer transportLayer; + if (securityProtocol == SecurityProtocol.SSL) { + transportLayer = new SSLTransportLayer(socketChannel, + sslFactory.createSSLEngine(socket.getInetAddress().getHostName(), + socket.getPort())); + } else { + transportLayer = new PlainTextTransportLayer(socketChannel); + } + Authenticator authenticator = new DefaultAuthenticator(transportLayer); + Channel channel = new Channel(transportLayer, authenticator); + SelectionKey key = socketChannel.register(this.selector, SelectionKey.OP_CONNECT); key.attach(new Transmissions(id)); this.keys.put(id, key); + this.channels.put(key, channel); } /** @@ -202,12 +232,12 @@ public class Selector implements Selectable { /** * Do whatever I/O can be done on each connection without blocking. This includes completing connections, completing * disconnections, initiating new sends, or making progress on in-progress sends or receives. - * + * * When this call is completed the user can check for completed sends, receives, connections or disconnects using * {@link #completedSends()}, {@link #completedReceives()}, {@link #connected()}, {@link #disconnected()}. These * lists will be cleared at the beginning of each {@link #poll(long, List)} call and repopulated by the call if any * completed I/O. - * + * * @param timeout The amount of time to wait, in milliseconds. If negative, wait indefinitely. * @throws IllegalStateException If a send is given for which we have no existing connection or for which there is * already an in-progress send @@ -230,7 +260,7 @@ public class Selector implements Selectable { iter.remove(); Transmissions transmissions = transmissions(key); - SocketChannel channel = channel(key); + Channel channel = channel(key); // register all per-broker metrics at once sensors.maybeRegisterNodeMetrics(transmissions.id); @@ -242,29 +272,46 @@ public class Selector implements Selectable { key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT | SelectionKey.OP_READ); this.connected.add(transmissions.id); this.sensors.connectionCreated.record(); + } /* read from any connections that have readable data */ if (key.isReadable()) { - if (!transmissions.hasReceive()) - transmissions.receive = new NetworkReceive(transmissions.id); - transmissions.receive.readFrom(channel); - if (transmissions.receive.complete()) { - transmissions.receive.payload().rewind(); - this.completedReceives.add(transmissions.receive); - this.sensors.recordBytesReceived(transmissions.id, transmissions.receive.payload().limit()); - transmissions.clearReceive(); + if (!channel.isReady()) { + int status = channel.connect(key.isReadable(), key.isWritable()); + if (status == 0) + key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT | SelectionKey.OP_READ); + else + key.interestOps(status); + } else { + if (!transmissions.hasReceive()) + transmissions.receive = new NetworkReceive(transmissions.id); + transmissions.receive.readFrom(channel); + if (transmissions.receive.complete()) { + transmissions.receive.payload().rewind(); + this.completedReceives.add(transmissions.receive); + this.sensors.recordBytesReceived(transmissions.id, transmissions.receive.payload().limit()); + transmissions.clearReceive(); + } } } /* write to any sockets that have space in their buffer and for which we have data */ if (key.isWritable()) { - transmissions.send.writeTo(channel); - if (transmissions.send.remaining() <= 0) { - this.completedSends.add(transmissions.send); - this.sensors.recordBytesSent(transmissions.id, transmissions.send.size()); - transmissions.clearSend(); - key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); + if (!channel.isReady()) { + int status = channel.connect(key.isReadable(), key.isWritable()); + if (status == 0) + key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT | SelectionKey.OP_READ); + else + key.interestOps(status); + } else { + transmissions.send.writeTo(channel); + if (transmissions.send.remaining() <= 0) { + this.completedSends.add(transmissions.send); + this.sensors.recordBytesSent(transmissions.id, transmissions.send.size()); + transmissions.clearSend(); + key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); + } } } @@ -287,9 +334,9 @@ public class Selector implements Selectable { long endIo = time.nanoseconds(); this.sensors.ioTime.record(endIo - endSelect, time.milliseconds()); } - - private String socketDescription(SocketChannel channel) { - Socket socket = channel.socket(); + + private String socketDescription(Channel channel) { + Socket socket = channel.socketChannel().socket(); if (socket == null) return "[unconnected socket]"; else if (socket.getInetAddress() != null) @@ -362,7 +409,7 @@ public class Selector implements Selectable { /** * Check for data, waiting up to the given timeout. - * + * * @param ms Length of time to wait, in milliseconds. If negative, wait indefinitely. * @return The number of keys ready * @throws IOException @@ -380,7 +427,8 @@ public class Selector implements Selectable { * Begin closing this connection */ private void close(SelectionKey key) { - SocketChannel channel = channel(key); + Channel channel = channel(key); + this.channels.remove(key); Transmissions trans = transmissions(key); if (trans != null) { this.keys.remove(trans.id); @@ -390,7 +438,6 @@ public class Selector implements Selectable { key.attach(null); key.cancel(); try { - channel.socket().close(); channel.close(); } catch (IOException e) { log.error("Exception closing connection to node {}:", trans.id, e); @@ -418,8 +465,8 @@ public class Selector implements Selectable { /** * Get the socket channel associated with this selection key */ - private SocketChannel channel(SelectionKey key) { - return (SocketChannel) key.channel(); + private Channel channel(SelectionKey key) { + return this.channels.get(key); } /** diff --git a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java new file mode 100644 index 0000000..6ce013b --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.network; + +/* + * Transport layer for underlying communication + */ +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SocketChannel; + +import java.io.DataInputStream; +import java.io.DataOutputStream; + +import java.security.Principal; + + +public interface TransportLayer { + + /** + * Closes this channel + * + * @throws IOException If and I/O error occurs + */ + void close() throws IOException; + + + /** + * Tells wheather or not this channel is open. + */ + boolean isOpen(); + + /** + * Writes a sequence of bytes to this channel from the given buffer. + */ + int write(ByteBuffer src) throws IOException; + + long write(ByteBuffer[] srcs) throws IOException; + + long write(ByteBuffer[] srcs, int offset, int length) throws IOException; + + int read(ByteBuffer dst) throws IOException; + + long read(ByteBuffer[] dsts) throws IOException; + + long read(ByteBuffer[] dsts, int offset, int length) throws IOException; + + boolean isReady(); + + boolean finishConnect() throws IOException; + + SocketChannel socketChannel(); + + /** + * Performs SSL handshake hence is a no-op for the non-secure + * implementation + * @param read Unused in non-secure implementation + * @param write Unused in non-secure implementation + * @return Always return 0 + * @throws IOException + */ + int handshake(boolean read, boolean write) throws IOException; + + DataInputStream inStream() throws IOException; + + DataOutputStream outStream() throws IOException; + + boolean flush(ByteBuffer buffer) throws IOException; + + Principal getPeerPrincipal(); +} diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/SecurityProtocol.java b/clients/src/main/java/org/apache/kafka/common/protocol/SecurityProtocol.java index dab1a94..d663f7a 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/SecurityProtocol.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/SecurityProtocol.java @@ -24,6 +24,8 @@ import java.util.Map; public enum SecurityProtocol { /** Un-authenticated, non-encrypted channel */ PLAINTEXT(0, "PLAINTEXT"), + /** SSL channe */ + SSL(1, "PLAINTEXT"), /** Currently identical to PLAINTEXT and used for testing only. We may implement extra instrumentation when testing channel code. */ TRACE(Short.MAX_VALUE, "TRACE"); diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java index f73eedb..9382060 100755 --- a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java @@ -479,4 +479,15 @@ public class Utils { public static String readFileAsString(String path) throws IOException { return Utils.readFileAsString(path, Charset.defaultCharset()); } + + public static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength) { + if (newLength > existingBuffer.capacity()) { + ByteBuffer newBuffer = ByteBuffer.allocate(newLength); + existingBuffer.flip(); + newBuffer.put(existingBuffer); + return newBuffer; + } + return existingBuffer; + } + } diff --git a/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java b/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java new file mode 100644 index 0000000..47dda69 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package org.apache.kafka.common.network; + +import org.apache.kafka.common.config.SecurityConfig; +import org.apache.kafka.common.protocol.SecurityProtocol; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.net.ServerSocket; +import java.net.Socket; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + + +/** + * A simple server that takes size delimited byte arrays and just echos them back to the sender. + */ +class EchoServer extends Thread { + public final int port; + private final ServerSocket serverSocket; + private final List threads; + private final List sockets; + private SecurityProtocol protocol; + private SSLFactory sslFactory; + private final AtomicBoolean startHandshake = new AtomicBoolean(); + + public EchoServer(SecurityConfig securityConfig) throws Exception { + this.protocol = SecurityProtocol.valueOf(securityConfig.getString(SecurityConfig.SECURITY_PROTOCOL_CONFIG)); + if (protocol == SecurityProtocol.SSL) { + this.sslFactory = new SSLFactory(SSLFactory.Mode.SERVER); + this.sslFactory.init(securityConfig); + this.serverSocket = sslFactory.createSSLServerSocketFactory().createServerSocket(0); + this.startHandshake.set(true); + } else { + this.serverSocket = new ServerSocket(0); + } + this.port = this.serverSocket.getLocalPort(); + this.threads = Collections.synchronizedList(new ArrayList()); + this.sockets = Collections.synchronizedList(new ArrayList()); + } + + + @Override + public void run() { + try { + while (true) { + final Socket socket = serverSocket.accept(); + sockets.add(socket); + Thread thread = new Thread() { + @Override + public void run() { + try { + DataInputStream input = new DataInputStream(socket.getInputStream()); + DataOutputStream output = new DataOutputStream(socket.getOutputStream()); + while (socket.isConnected() && !socket.isClosed()) { + int size = input.readInt(); + byte[] bytes = new byte[size]; + input.readFully(bytes); + output.writeInt(size); + output.write(bytes); + output.flush(); + } + } catch (IOException e) { + // ignore + } finally { + try { + socket.close(); + } catch (IOException e) { + // ignore + } + } + } + }; + thread.start(); + threads.add(thread); + } + } catch (IOException e) { + // ignore + } + } + + public void closeConnections() throws IOException { + for (Socket socket : sockets) + socket.close(); + } + + public void close() throws IOException, InterruptedException { + this.serverSocket.close(); + closeConnections(); + for (Thread t : threads) + t.join(); + join(); + } +} \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java new file mode 100644 index 0000000..f3c6153 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package org.apache.kafka.common.network; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.LinkedHashMap; + +import org.apache.kafka.common.config.SecurityConfig; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.test.TestSSLUtils; +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.test.TestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * A set of tests for the selector over ssl. These use a test harness that runs a simple socket server that echos back responses. + */ + +public class SSLSelectorTest { + + private static final int BUFFER_SIZE = 4 * 1024; + + private EchoServer server; + private Selectable selector; + + @Before + public void setup() throws Exception { + SecurityConfig serverSecurityConfig = TestSSLUtils.createSSLConfigFile(SSLFactory.Mode.SERVER, null); + this.server = new EchoServer(serverSecurityConfig); + this.server.start(); + String trustStoreServer = serverSecurityConfig.getString(SecurityConfig.SSL_TRUSTSTORE_LOCATION_CONFIG); + SecurityConfig clientSecurityConfig = TestSSLUtils.createSSLConfigFile(SSLFactory.Mode.CLIENT, trustStoreServer); + this.selector = new Selector(new Metrics(), new MockTime(), "MetricGroup", new LinkedHashMap(), clientSecurityConfig); + } + + @After + public void teardown() throws Exception { + this.selector.close(); + this.server.close(); + } + + + /** + * Validate that we can send and receive a message larger than the receive and send buffer size + */ + @Test + public void testSendLargeRequest() throws Exception { + int node = 0; + blockingConnect(node); + String big = TestUtils.randomString(10 * BUFFER_SIZE); + assertEquals(big, blockingRequest(node, big)); + } + + private String blockingRequest(int node, String s) throws IOException { + selector.send(createSend(node, s)); + selector.poll(1000L); + while (true) { + selector.poll(1000L); + for (NetworkReceive receive : selector.completedReceives()) + if (receive.source() == node) + return asString(receive); + } + } + + private String asString(NetworkReceive receive) { + return new String(Utils.toArray(receive.payload())); + } + + private NetworkSend createSend(int node, String s) { + return new NetworkSend(node, ByteBuffer.wrap(s.getBytes())); + } + + /* connect and wait for the connection to complete */ + private void blockingConnect(int node) throws IOException { + selector.connect(node, new InetSocketAddress("localhost", server.port), BUFFER_SIZE, BUFFER_SIZE); + while (!selector.connected().contains(node)) + selector.poll(10000L); + } + +} \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java index d5b306b..e4100d3 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java @@ -15,18 +15,14 @@ package org.apache.kafka.common.network; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import java.io.DataInputStream; -import java.io.DataOutputStream; import java.io.IOException; import java.net.InetSocketAddress; import java.net.ServerSocket; -import java.net.Socket; import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; import java.util.LinkedHashMap; -import java.util.List; +import org.apache.kafka.clients.ClientUtils; +import org.apache.kafka.common.config.SecurityConfig; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Utils; @@ -47,9 +43,10 @@ public class SelectorTest { @Before public void setup() throws Exception { - this.server = new EchoServer(); + SecurityConfig securityConfig = ClientUtils.parseSecurityConfig(""); + this.server = new EchoServer(securityConfig); this.server.start(); - this.selector = new Selector(new Metrics(), new MockTime() , "MetricGroup", new LinkedHashMap()); + this.selector = new Selector(new Metrics(), new MockTime() , "MetricGroup", new LinkedHashMap(), securityConfig); } @After @@ -264,71 +261,5 @@ public class SelectorTest { return new String(Utils.toArray(receive.payload())); } - /** - * A simple server that takes size delimited byte arrays and just echos them back to the sender. - */ - static class EchoServer extends Thread { - public final int port; - private final ServerSocket serverSocket; - private final List threads; - private final List sockets; - - public EchoServer() throws Exception { - this.serverSocket = new ServerSocket(0); - this.port = this.serverSocket.getLocalPort(); - this.threads = Collections.synchronizedList(new ArrayList()); - this.sockets = Collections.synchronizedList(new ArrayList()); - } - - public void run() { - try { - while (true) { - final Socket socket = serverSocket.accept(); - sockets.add(socket); - Thread thread = new Thread() { - public void run() { - try { - DataInputStream input = new DataInputStream(socket.getInputStream()); - DataOutputStream output = new DataOutputStream(socket.getOutputStream()); - while (socket.isConnected() && !socket.isClosed()) { - int size = input.readInt(); - byte[] bytes = new byte[size]; - input.readFully(bytes); - output.writeInt(size); - output.write(bytes); - output.flush(); - } - } catch (IOException e) { - // ignore - } finally { - try { - socket.close(); - } catch (IOException e) { - // ignore - } - } - } - }; - thread.start(); - threads.add(thread); - } - } catch (IOException e) { - // ignore - } - } - - public void closeConnections() throws IOException { - for (Socket socket : sockets) - socket.close(); - } - - public void close() throws IOException, InterruptedException { - this.serverSocket.close(); - closeConnections(); - for (Thread t : threads) - t.join(); - join(); - } - } } diff --git a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java index 2ebe3c2..bf2b5bd 100755 --- a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java @@ -100,4 +100,6 @@ public class UtilsTest { buffer = ByteBuffer.wrap(myvar).asReadOnlyBuffer(); this.subTest(buffer); } + + } \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java new file mode 100644 index 0000000..c811096 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java @@ -0,0 +1,208 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.test; + +import org.apache.kafka.common.config.SecurityConfig; +import org.apache.kafka.common.network.SSLFactory; +import sun.security.x509.AlgorithmId; +import sun.security.x509.CertificateAlgorithmId; +import sun.security.x509.CertificateIssuerName; +import sun.security.x509.CertificateSerialNumber; +import sun.security.x509.CertificateSubjectName; +import sun.security.x509.CertificateValidity; +import sun.security.x509.CertificateVersion; +import sun.security.x509.CertificateX509Key; +import sun.security.x509.X500Name; +import sun.security.x509.X509CertImpl; +import sun.security.x509.X509CertInfo; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.math.BigInteger; +import java.security.GeneralSecurityException; +import java.security.Key; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.KeyStore; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.SecureRandom; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + + +public class TestSSLUtils { + + /** + * Create a self-signed X.509 Certificate. + * From http://bfo.com/blog/2011/03/08/odds_and_ends_creating_a_new_x_509_certificate.html. + * + * @param dn the X.509 Distinguished Name, eg "CN=Test, L=London, C=GB" + * @param pair the KeyPair + * @param days how many days from now the Certificate is valid for + * @param algorithm the signing algorithm, eg "SHA1withRSA" + * @return the self-signed certificate + * @throws IOException thrown if an IO error ocurred. + * @throws GeneralSecurityException thrown if an Security error ocurred. + */ + public static X509Certificate generateCertificate(String dn, KeyPair pair, + int days, String algorithm) throws GeneralSecurityException, IOException { + PrivateKey privkey = pair.getPrivate(); + X509CertInfo info = new X509CertInfo(); + Date from = new Date(); + Date to = new Date(from.getTime() + days * 86400000L); + CertificateValidity interval = new CertificateValidity(from, to); + BigInteger sn = new BigInteger(64, new SecureRandom()); + X500Name owner = new X500Name(dn); + + info.set(X509CertInfo.VALIDITY, interval); + info.set(X509CertInfo.SERIAL_NUMBER, new CertificateSerialNumber(sn)); + info.set(X509CertInfo.SUBJECT, new CertificateSubjectName(owner)); + info.set(X509CertInfo.ISSUER, new CertificateIssuerName(owner)); + info.set(X509CertInfo.KEY, new CertificateX509Key(pair.getPublic())); + info + .set(X509CertInfo.VERSION, new CertificateVersion(CertificateVersion.V3)); + AlgorithmId algo = new AlgorithmId(AlgorithmId.md5WithRSAEncryption_oid); + info.set(X509CertInfo.ALGORITHM_ID, new CertificateAlgorithmId(algo)); + + // Sign the cert to identify the algorithm that's used. + X509CertImpl cert = new X509CertImpl(info); + cert.sign(privkey, algorithm); + + // Update the algorith, and resign. + algo = (AlgorithmId) cert.get(X509CertImpl.SIG_ALG); + info + .set(CertificateAlgorithmId.NAME + "." + CertificateAlgorithmId.ALGORITHM, + algo); + cert = new X509CertImpl(info); + cert.sign(privkey, algorithm); + return cert; + } + + public static KeyPair generateKeyPair(String algorithm) throws NoSuchAlgorithmException { + KeyPairGenerator keyGen = KeyPairGenerator.getInstance(algorithm); + keyGen.initialize(1024); + return keyGen.genKeyPair(); + } + + private static KeyStore createEmptyKeyStore() throws GeneralSecurityException, IOException { + KeyStore ks = KeyStore.getInstance("JKS"); + ks.load(null, null); // initialize + return ks; + } + + private static void saveKeyStore(KeyStore ks, String filename, + String password) throws GeneralSecurityException, IOException { + FileOutputStream out = new FileOutputStream(filename); + try { + ks.store(out, password.toCharArray()); + } finally { + out.close(); + } + } + + public static void createKeyStore(String filename, + String password, String alias, + Key privateKey, Certificate cert) throws GeneralSecurityException, IOException { + KeyStore ks = createEmptyKeyStore(); + ks.setKeyEntry(alias, privateKey, password.toCharArray(), + new Certificate[]{cert}); + saveKeyStore(ks, filename, password); + } + + /** + * Creates a keystore with a single key and saves it to a file. + * + * @param filename String file to save + * @param password String store password to set on keystore + * @param keyPassword String key password to set on key + * @param alias String alias to use for the key + * @param privateKey Key to save in keystore + * @param cert Certificate to use as certificate chain associated to key + * @throws GeneralSecurityException for any error with the security APIs + * @throws IOException if there is an I/O error saving the file + */ + public static void createKeyStore(String filename, + String password, String keyPassword, String alias, + Key privateKey, Certificate cert) throws GeneralSecurityException, IOException { + KeyStore ks = createEmptyKeyStore(); + ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(), + new Certificate[]{cert}); + saveKeyStore(ks, filename, password); + } + + public static void createTrustStore(String filename, + String password, String alias, + Certificate cert) throws GeneralSecurityException, IOException { + KeyStore ks = createEmptyKeyStore(); + ks.setCertificateEntry(alias, cert); + saveKeyStore(ks, filename, password); + } + + public static void createTrustStore( + String filename, String password, Map certs) throws GeneralSecurityException, IOException { + KeyStore ks = createEmptyKeyStore(); + for (Map.Entry cert : certs.entrySet()) { + ks.setCertificateEntry(cert.getKey(), cert.getValue()); + } + saveKeyStore(ks, filename, password); + } + + public static SecurityConfig createSSLConfigFile(SSLFactory.Mode mode, String trustStoreFileClient) throws IOException, GeneralSecurityException { + Properties securityConfigProps = new Properties(); + Map certs = new HashMap(); + KeyPair keyPair = generateKeyPair("RSA"); + X509Certificate cert = generateCertificate("CN=localhost, O=localhost", keyPair, 30, "SHA1withRSA"); + String password = "test"; + + if (mode == SSLFactory.Mode.SERVER) { + File keyStoreFile = File.createTempFile("keystore", ".jks"); + createKeyStore(keyStoreFile.getPath(), password, password, "localhost", keyPair.getPrivate(), cert); + certs.put("localhost", cert); + securityConfigProps.put(SecurityConfig.SSL_KEYSTORE_LOCATION_CONFIG, keyStoreFile.getPath()); + securityConfigProps.put(SecurityConfig.SSL_KEYSTORE_TYPE_CONFIG, "JKS"); + securityConfigProps.put(SecurityConfig.SSL_KEYMANAGER_ALGORITHM_CONFIG, "SunX509"); + securityConfigProps.put(SecurityConfig.SSL_KEYSTORE_PASSWORD_CONFIG, password); + securityConfigProps.put(SecurityConfig.SSL_KEY_PASSWORD_CONFIG, password); + + File trustStoreFile = File.createTempFile("truststore", ".jks"); + createTrustStore(trustStoreFile.getPath(), password, certs); + + securityConfigProps.put(SecurityConfig.SECURITY_PROTOCOL_CONFIG, "SSL"); + securityConfigProps.put(SecurityConfig.SSL_CLIENT_REQUIRE_CERT_CONFIG, "false"); + securityConfigProps.put(SecurityConfig.SSL_TRUSTSTORE_LOCATION_CONFIG, trustStoreFile.getPath()); + securityConfigProps.put(SecurityConfig.SSL_TRUSTSTORE_PASSWORD_CONFIG, password); + securityConfigProps.put(SecurityConfig.SSL_TRUSTSTORE_TYPE_CONFIG, "JKS"); + } else { + securityConfigProps.put(SecurityConfig.SECURITY_PROTOCOL_CONFIG, "SSL"); + securityConfigProps.put(SecurityConfig.SSL_CLIENT_REQUIRE_CERT_CONFIG, "false"); + securityConfigProps.put(SecurityConfig.SSL_TRUSTSTORE_LOCATION_CONFIG, trustStoreFileClient); + securityConfigProps.put(SecurityConfig.SSL_TRUSTSTORE_PASSWORD_CONFIG, password); + securityConfigProps.put(SecurityConfig.SSL_TRUSTSTORE_TYPE_CONFIG, "JKS"); + } + + securityConfigProps.put(SecurityConfig.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, "SunX509"); + return new SecurityConfig(securityConfigProps); + } + +} -- 2.4.6 From 754a121e7582f1452a9ae3a3ab72c58cf284da1d Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Sun, 10 May 2015 23:02:01 -0700 Subject: [PATCH 02/30] KAFKA-1690. new java producer needs ssl support as a client. --- build.gradle | 13 +- checkstyle/import-control.xml | 39 ++-- .../java/org/apache/kafka/clients/ClientUtils.java | 11 +- .../apache/kafka/clients/CommonClientConfigs.java | 67 ++++++- .../kafka/clients/consumer/ConsumerConfig.java | 28 ++- .../kafka/clients/consumer/KafkaConsumer.java | 99 +++++----- .../kafka/clients/producer/KafkaProducer.java | 61 +++--- .../kafka/clients/producer/ProducerConfig.java | 25 ++- .../apache/kafka/common/config/AbstractConfig.java | 12 +- .../apache/kafka/common/config/SecurityConfig.java | 113 ----------- .../apache/kafka/common/network/Authenticator.java | 2 +- .../org/apache/kafka/common/network/Channel.java | 2 +- .../kafka/common/network/DefaultAuthenticator.java | 2 +- .../common/network/PlainTextTransportLayer.java | 3 +- .../apache/kafka/common/network/SSLFactory.java | 55 +++--- .../kafka/common/network/SSLTransportLayer.java | 217 +++++++++++---------- .../org/apache/kafka/common/network/Selector.java | 33 ++-- .../kafka/common/network/TransportLayer.java | 2 +- .../apache/kafka/common/network/EchoServer.java | 18 +- .../kafka/common/network/SSLSelectorTest.java | 72 ++++++- .../apache/kafka/common/network/SelectorTest.java | 14 +- .../java/org/apache/kafka/test/TestSSLUtils.java | 157 ++++++++------- 22 files changed, 551 insertions(+), 494 deletions(-) delete mode 100644 clients/src/main/java/org/apache/kafka/common/config/SecurityConfig.java diff --git a/build.gradle b/build.gradle index cd2aa83..4e83d7d 100644 --- a/build.gradle +++ b/build.gradle @@ -4,9 +4,9 @@ // The ASF licenses this file to You under the Apache License, Version 2.0 // (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -109,7 +109,7 @@ subprojects { archives srcJar archives javadocJar } - + plugins.withType(ScalaPlugin) { //source jar should also contain scala source: srcJar.from sourceSets.main.scala @@ -250,9 +250,9 @@ project(':core') { into "$buildDir/dependant-libs-${scalaVersion}" } - tasks.create(name: "releaseTarGz", dependsOn: configurations.archives.artifacts, type: Tar) { + tasks.create(name: "releaseTarGz", dependsOn: configurations.archives.artifacts, type: Tar) { into "kafka_${baseScalaVersion}-${version}" - compression = Compression.GZIP + compression = Compression.GZIP from(project.file("../bin")) { into "bin/" } from(project.file("../config")) { into "config/" } from '../LICENSE' @@ -354,6 +354,7 @@ project(':clients') { compile "org.slf4j:slf4j-api:1.7.6" compile 'org.xerial.snappy:snappy-java:1.1.1.6' compile 'net.jpountz.lz4:lz4:1.2.0' + compile 'org.bouncycastle:bcprov-jdk16:1.46' testCompile 'com.novocode:junit-interface:0.9' testRuntime "$slf4jlog4j" @@ -382,7 +383,7 @@ project(':clients') { artifacts { archives testJar } - + checkstyle { configFile = new File(rootDir, "checkstyle/checkstyle.xml") } diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index e649189..339c620 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -8,19 +8,19 @@ // The ASF licenses this file to You under the Apache License, Version 2.0 // (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. ---> +--> - + - + @@ -28,11 +28,11 @@ - - + + - + @@ -43,7 +43,7 @@ - + @@ -51,35 +51,35 @@ - + - + - + - + - + - + @@ -92,15 +92,15 @@ - + - + - + @@ -109,6 +109,7 @@ + - + diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index 54a554f..3657279 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -69,10 +69,9 @@ public class ClientUtils { public static SecurityConfig parseSecurityConfig(String securityConfigFile) throws IOException { Properties securityProps = new Properties(); - if (securityConfigFile == null || securityConfigFile == "") { - return new SecurityConfig(securityProps); + if (securityConfigFile != null && securityConfigFile != "") { + securityProps = loadProps(securityConfigFile); } - securityProps = loadProps(securityConfigFile); return new SecurityConfig(securityProps); } -} \ No newline at end of file +} diff --git a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java index 0b23875..ead3826 100644 --- a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java +++ b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -17,7 +17,7 @@ package org.apache.kafka.clients; * Some configurations shared by both producer and consumer */ public class CommonClientConfigs { - + /* * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. */ @@ -27,10 +27,10 @@ public class CommonClientConfigs { + "host1:port1,host2:port2,.... Since these servers are just used for the initial connection to " + "discover the full cluster membership (which may change dynamically), this list need not contain the full set of " + "servers (you may want more than one, though, in case a server is down)."; - + public static final String METADATA_MAX_AGE_CONFIG = "metadata.max.age.ms"; public static final String METADATA_MAX_AGE_DOC = "The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions."; - + public static final String SEND_BUFFER_CONFIG = "send.buffer.bytes"; public static final String SEND_BUFFER_DOC = "The size of the TCP send buffer (SO_SNDBUF) to use when sending data."; @@ -45,7 +45,7 @@ public class CommonClientConfigs { public static final String RETRY_BACKOFF_MS_CONFIG = "retry.backoff.ms"; public static final String RETRY_BACKOFF_MS_DOC = "The amount of time to wait before attempting to retry a failed fetch request to a given topic partition. This avoids repeated fetching-and-failing in a tight loop."; - + public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = "metrics.sample.window.ms"; public static final String METRICS_SAMPLE_WINDOW_MS_DOC = "The number of samples maintained to compute metrics."; @@ -57,4 +57,57 @@ public class CommonClientConfigs { public static final String SECURITY_CONFIG_FILE_CONFIG = "security.config.file"; public static final String SECURITY_CONFIG_FILE_DOC = "Kafka client security related config file."; -} \ No newline at end of file + + public static final String SECURITY_PROTOCOL_CONFIG = "security.protocol"; + public static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. Currently only PLAINTEXT and SSL are supported."; + + public static final String SSL_PROTOCOL_CONFIG = "ssl.protocol"; + public static final String SSL_PROTOCOL_DOC = "The ssl protocol used to generate SSLContext." + + "Default setting is TLS. Allowed values are SSL, SSLv2, SSLv3, TLS, TLSv1.1, TLSv1.2"; + + public static final String SSL_CIPHER_SUITES_CONFIG = "ssl.cipher.suites"; + public static final String SSL_CIPHER_SUITES_DOC = "A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol." + + "By default all the available cipher suites are supported."; + + public static final String SSL_ENABLED_PROTOCOLS_CONFIG = "ssl.enabled.protocols"; + public static final String SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. " + + "All versions of TLS is enabled by default."; + + public static final String SSL_KEYSTORE_TYPE_CONFIG = "ssl.keystore.type"; + public static final String SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. " + + "This is optional for client. Default value is JKS"; + + public static final String SSL_KEYSTORE_LOCATION_CONFIG = "ssl.keystore.location"; + public static final String SSL_KEYSTORE_LOCATION_DOC = "The location of the key store file. " + + "This is optional for Client and can be used for two-way authentication for client."; + + public static final String SSL_KEYSTORE_PASSWORD_CONFIG = "ssl.keystore.password"; + public static final String SSL_KEYSTORE_PASSWORD_DOC = "The store password for the key store file. "; + + public static final String SSL_KEY_PASSWORD_CONFIG = "ssl.key.password"; + public static final String SSL_KEY_PASSWORD_DOC = "The password of the private key in the key store file. " + + "This is optional for client."; + + public static final String SSL_TRUSTSTORE_TYPE_CONFIG = "ssl.truststore.type"; + public static final String SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. " + + "Default value is JKS."; + + public static final String SSL_TRUSTSTORE_LOCATION_CONFIG = "ssl.truststore.location"; + public static final String SSL_TRUSTSTORE_LOCATION_DOC = "The location of the trust store file. "; + + public static final String SSL_TRUSTSTORE_PASSWORD_CONFIG = "ssl.truststore.password"; + public static final String SSL_TRUSTSTORE_PASSWORD_DOC = "The password for the trust store file. "; + + public static final String SSL_CLIENT_REQUIRE_CERT_CONFIG = "ssl.client.require.cert"; + public static final String SSL_CLIENT_REQUIRE_CERT_DOC = "This is to enforce two-way authentication between client and server." + + "Default value is false. If set to true client need to prover Keystrore releated config"; + + public static final String SSL_KEYMANAGER_ALGORITHM_CONFIG = "ssl.keymanager.algorithm"; + public static final String SSL_KEYMANAGER_ALGORITHM_DOC = "The algorithm used by key manager factory for SSL connections. " + + "Default value is the key manager factory algorithm configured for the Java Virtual Machine."; + + public static final String SSL_TRUSTMANAGER_ALGORITHM_CONFIG = "ssl.trustmanager.algorithm"; + public static final String SSL_TRUSTMANAGER_ALGORITHM_DOC = "The algorithm used by trust manager factory for SSL connections. " + + "Default value is the trust manager factory algorithm configured for the Java Virtual Machine."; + +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java index 190fe63..4ccd423 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -19,6 +19,7 @@ import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Importance; import org.apache.kafka.common.config.ConfigDef.Type; import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.protocol.SecurityProtocol; import java.util.HashMap; import java.util.Map; @@ -147,7 +148,7 @@ public class ConsumerConfig extends AbstractConfig { */ public static final String CHECK_CRCS_CONFIG = "check.crcs"; private static final String CHECK_CRCS_DOC = "Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance."; - + /** key.deserializer */ public static final String KEY_DESERIALIZER_CLASS_CONFIG = "key.deserializer"; private static final String KEY_DESERIALIZER_CLASS_DOC = "Deserializer class for key that implements the Deserializer interface."; @@ -256,7 +257,7 @@ public class ConsumerConfig extends AbstractConfig { Type.BOOLEAN, true, Importance.LOW, - CHECK_CRCS_DOC) + CHECK_CRCS_DOC) .define(METRICS_SAMPLE_WINDOW_MS_CONFIG, Type.LONG, 30000, @@ -282,11 +283,20 @@ public class ConsumerConfig extends AbstractConfig { Type.CLASS, Importance.HIGH, VALUE_DESERIALIZER_CLASS_DOC) - .define(SECURITY_CONFIG_FILE_CONFIG, - Type.STRING, - "", - Importance.MEDIUM, - SECURITY_CONFIG_FILE_DOC); + .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, SecurityProtocol.PLAINTEXT.toString(), Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) + .define(CommonClientConfigs.SSL_PROTOCOL_CONFIG, Type.STRING, "TLS", Importance.MEDIUM, CommonClientConfigs.SSL_PROTOCOL_DOC) + .define(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG, Type.LIST, "", Importance.LOW, CommonClientConfigs.SSL_CIPHER_SUITES_DOC) + .define(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Type.LIST, "TLSv1.2, TLSv1.1, TLSv1", Importance.MEDIUM, CommonClientConfigs.SSL_ENABLED_PROTOCOLS_DOC) + .define(CommonClientConfigs.SSL_KEYSTORE_TYPE_CONFIG, Type.STRING, "JKS", Importance.MEDIUM, CommonClientConfigs.SSL_KEYSTORE_TYPE_DOC) + .define(CommonClientConfigs.SSL_KEYSTORE_LOCATION_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_KEYSTORE_LOCATION_DOC, false) + .define(CommonClientConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_KEYSTORE_PASSWORD_DOC, false) + .define(CommonClientConfigs.SSL_KEY_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_KEY_PASSWORD_DOC, false) + .define(CommonClientConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, Type.STRING, "JKS", Importance.MEDIUM, CommonClientConfigs.SSL_TRUSTSTORE_TYPE_DOC) + .define(CommonClientConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_TRUSTSTORE_LOCATION_DOC, false) + .define(CommonClientConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_TRUSTSTORE_PASSWORD_DOC, false) + .define(CommonClientConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, Type.STRING, "SunX509", Importance.LOW, CommonClientConfigs.SSL_KEYMANAGER_ALGORITHM_DOC) + .define(CommonClientConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, Type.STRING, "SunX509", Importance.LOW, CommonClientConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC) + .define(CommonClientConfigs.SSL_CLIENT_REQUIRE_CERT_CONFIG, Type.BOOLEAN, false, Importance.MEDIUM, CommonClientConfigs.SSL_CLIENT_REQUIRE_CERT_DOC); } public static Map addDeserializerToConfig(Map configs, diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java index 032fd4b..375669f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -37,7 +37,6 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.config.SecurityConfig; import org.apache.kafka.common.metrics.JmxReporter; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.MetricName; @@ -63,7 +62,7 @@ import org.slf4j.LoggerFactory; *

* The consumer is thread safe but generally will be used only from within a single thread. The consumer client has no * threads of it's own, all work is done in the caller's thread when calls are made on the various methods exposed. - * + * *

Offsets and Consumer Position

* Kafka maintains a numerical offset for each record in a partition. This offset acts as a kind of unique identifier of * a record within that partition, and also denotes the position of the consumer in the partition. That is, a consumer @@ -81,9 +80,9 @@ import org.slf4j.LoggerFactory; *

* This distinction gives the consumer control over when a record is considered consumed. It is discussed in further * detail below. - * + * *

Consumer Groups

- * + * * Kafka uses the concept of consumer groups to allow a pool of processes to divide up the work of consuming and * processing records. These processes can either be running on the same machine or, as is more likely, they can be * distributed over many machines to provide additional scalability and fault tolerance for processing. @@ -112,14 +111,14 @@ import org.slf4j.LoggerFactory; *

* It is also possible for the consumer to manually specify the partitions it subscribes to, which disables this dynamic * partition balancing. - * + * *

Usage Examples

* The consumer APIs offer flexibility to cover a variety of consumption use cases. Here are some examples to * demonstrate how to use them. - * + * *

Simple Processing

* This example demonstrates the simplest usage of Kafka's consumer api. - * + * *
  *     Properties props = new Properties();
  *     props.put("bootstrap.servers", "localhost:9092");
@@ -137,7 +136,7 @@ import org.slf4j.LoggerFactory;
  *             System.out.printf("offset = %d, key = %s, value = %s", record.offset(), record.key(), record.value());
  *     }
  * 
- * + * * Setting enable.auto.commit means that offsets are committed automatically with a frequency controlled by * the config auto.commit.interval.ms. *

@@ -157,9 +156,9 @@ import org.slf4j.LoggerFactory; *

* The serializers settings specify how to turn the objects the user provides into bytes. By specifying the string * serializers we are saying that our record's key and value will just be simple strings. - * + * *

Controlling When Messages Are Considered Consumed

- * + * * In this example we will consume a batch of records and batch them up in memory, when we have sufficient records * batched we will insert them into a database. If we allowed offsets to auto commit as in the previous example messages * would be considered consumed after they were given out by the consumer, and it would be possible that our process @@ -171,7 +170,7 @@ import org.slf4j.LoggerFactory; * would consume from last committed offset and would repeat the insert of the last batch of data. Used in this way * Kafka provides what is often called "at-least once delivery" guarantees, as each message will likely be delivered one * time but in failure cases could be duplicated. - * + * *
  *     Properties props = new Properties();
  *     props.put("bootstrap.servers", "localhost:9092");
@@ -197,9 +196,9 @@ import org.slf4j.LoggerFactory;
  *         }
  *     }
  * 
- * + * *

Subscribing To Specific Partitions

- * + * * In the previous examples we subscribed to the topics we were interested in and let Kafka give our particular process * a fair share of the partitions for those topics. This provides a simple load balancing mechanism so multiple * instances of our program can divided up the work of processing records. @@ -219,7 +218,7 @@ import org.slf4j.LoggerFactory; *

* This mode is easy to specify, rather than subscribing to the topic, the consumer just subscribes to particular * partitions: - * + * *

  *     String topic = "foo";
  *     TopicPartition partition0 = new TopicPartition(topic, 0);
@@ -227,15 +226,15 @@ import org.slf4j.LoggerFactory;
  *     consumer.subscribe(partition0);
  *     consumer.subscribe(partition1);
  * 
- * + * * The group that the consumer specifies is still used for committing offsets, but now the set of partitions will only * be changed if the consumer specifies new partitions, and no attempt at failure detection will be made. *

* It isn't possible to mix both subscription to specific partitions (with no load balancing) and to topics (with load * balancing) using the same consumer instance. - * + * *

Managing Your Own Offsets

- * + * * The consumer application need not use Kafka's built-in offset storage, it can store offsets in a store of it's own * choosing. The primary use case for this is allowing the application to store both the offset and the results of the * consumption in the same system in a way that both the results and offsets are stored atomically. This is not always @@ -255,14 +254,14 @@ import org.slf4j.LoggerFactory; * This means that in this case the indexing process that comes back having lost recent updates just resumes indexing * from what it has ensuring that no updates are lost. * - * + * * Each record comes with it's own offset, so to manage your own offset you just need to do the following: *
    *
  1. Configure enable.auto.commit=false *
  2. Use the offset provided with each {@link ConsumerRecord} to save your position. *
  3. On restart restore the position of the consumer using {@link #seek(TopicPartition, long)}. *
- * + * * This type of usage is simplest when the partition assignment is also done manually (this would be likely in the * search index use case described above). If the partition assignment is done automatically special care will also be * needed to handle the case where partition assignments change. This can be handled using a special callback specified @@ -275,9 +274,9 @@ import org.slf4j.LoggerFactory; *

* Another common use for {@link ConsumerRebalanceCallback} is to flush any caches the application maintains for * partitions that are moved elsewhere. - * + * *

Controlling The Consumer's Position

- * + * * In most use cases the consumer will simply consume records from beginning to end, periodically committing it's * position (either automatically or manually). However Kafka allows the consumer to manually control it's position, * moving forward or backwards in a partition at will. This means a consumer can re-consume older records, or skip to @@ -292,20 +291,20 @@ import org.slf4j.LoggerFactory; * the consumer will want to initialize it's position on start-up to whatever is contained in the local store. Likewise * if the local state is destroyed (say because the disk is lost) the state may be recreated on a new machine by * reconsuming all the data and recreating the state (assuming that Kafka is retaining sufficient history). - * + * * Kafka allows specifying the position using {@link #seek(TopicPartition, long)} to specify the new position. Special * methods for seeking to the earliest and latest offset the server maintains are also available ( * {@link #seekToBeginning(TopicPartition...)} and {@link #seekToEnd(TopicPartition...)} respectively). - * + * *

Multithreaded Processing

- * + * * The Kafka consumer is threadsafe but coarsely synchronized. All network I/O happens in the thread of the application * making the call. We have intentionally avoided implementing a particular threading model for processing. *

* This leaves several options for implementing multi-threaded processing of records. - * + * *

1. One Consumer Per Thread

- * + * * A simple option is to give each thread it's own consumer instance. Here are the pros and cons of this approach: *
    *
  • PRO: It is the easiest to implement @@ -318,13 +317,13 @@ import org.slf4j.LoggerFactory; * which can cause some drop in I/O throughput. *
  • CON: The number of total threads across all processes will be limited by the total number of partitions. *
- * + * *

2. Decouple Consumption and Processing

- * + * * Another alternative is to have one or more consumer threads that do all data consumption and hands off * {@link ConsumerRecords} instances to a blocking queue consumed by a pool of processor threads that actually handle * the record processing. - * + * * This option likewise has pros and cons: *
    *
  • PRO: This option allows independently scaling the number of consumers and processors. This makes it @@ -335,11 +334,11 @@ import org.slf4j.LoggerFactory; *
  • CON: Manually committing the position becomes harder as it requires that all threads co-ordinate to ensure * that processing is complete for that partition. *
- * + * * There are many possible variations on this approach. For example each processor thread can have it's own queue, and * the consumer threads can hash into these queues using the TopicPartition to ensure in-order consumption and simplify * commit. - * + * */ public class KafkaConsumer implements Consumer { @@ -362,7 +361,6 @@ public class KafkaConsumer implements Consumer { private final boolean autoCommit; private final long autoCommitIntervalMs; private final ConsumerRebalanceCallback rebalanceCallback; - private final SecurityConfig securityConfig; private long lastCommitAttemptMs; private boolean closed = false; @@ -373,7 +371,7 @@ public class KafkaConsumer implements Consumer { * string "42" or the integer 42). *

* Valid configuration strings are documented at {@link ConsumerConfig} - * + * * @param configs The consumer configs */ public KafkaConsumer(Map configs) { @@ -385,7 +383,7 @@ public class KafkaConsumer implements Consumer { * {@link ConsumerRebalanceCallback} implementation, a key and a value {@link Deserializer}. *

* Valid configuration strings are documented at {@link ConsumerConfig} - * + * * @param configs The consumer configs * @param callback A callback interface that the user can implement to manage customized offsets on the start and * end of every rebalance operation. @@ -419,7 +417,7 @@ public class KafkaConsumer implements Consumer { * {@link ConsumerRebalanceCallback} implementation, a key and a value {@link Deserializer}. *

* Valid configuration strings are documented at {@link ConsumerConfig} - * + * * @param properties The consumer configuration properties * @param callback A callback interface that the user can implement to manage customized offsets on the start and * end of every rebalance operation. @@ -470,12 +468,11 @@ public class KafkaConsumer implements Consumer { this.metadata = new Metadata(retryBackoffMs, config.getLong(ConsumerConfig.METADATA_MAX_AGE_CONFIG)); List addresses = ClientUtils.parseAndValidateAddresses(config.getList(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)); this.metadata.update(Cluster.bootstrap(addresses), 0); - String metricGrpPrefix = "consumer"; Map metricsTags = new LinkedHashMap(); metricsTags.put("client-id", clientId); - this.securityConfig = ClientUtils.parseSecurityConfig(config.getString(ConsumerConfig.SECURITY_CONFIG_FILE_CONFIG)); - this.client = new NetworkClient(new Selector(metrics, time, metricGrpPrefix, metricsTags, securityConfig), + + this.client = new NetworkClient(new Selector(metrics, time, metricGrpPrefix, metricsTags, config.values()), this.metadata, clientId, 100, // a fixed large enough value will suffice @@ -559,7 +556,7 @@ public class KafkaConsumer implements Consumer { *

  • An existing member of the consumer group dies *
  • A new member is added to an existing consumer group via the join API * - * + * * @param topics A variable list of topics that the consumer wants to subscribe to */ @Override @@ -576,7 +573,7 @@ public class KafkaConsumer implements Consumer { * functionality. As such, there will be no rebalance operation triggered when group membership or cluster and topic * metadata change. *

    - * + * * @param partitions Partitions to incrementally subscribe to */ @Override @@ -592,7 +589,7 @@ public class KafkaConsumer implements Consumer { /** * Unsubscribe from the specific topics. This will trigger a rebalance operation and records for this topic will not * be returned from the next {@link #poll(long) poll()} onwards - * + * * @param topics Topics to unsubscribe from */ public synchronized void unsubscribe(String... topics) { @@ -606,7 +603,7 @@ public class KafkaConsumer implements Consumer { /** * Unsubscribe from the specific topic partitions. records for these partitions will not be returned from the next * {@link #poll(long) poll()} onwards - * + * * @param partitions Partitions to unsubscribe from */ public synchronized void unsubscribe(TopicPartition... partitions) { @@ -625,11 +622,11 @@ public class KafkaConsumer implements Consumer { * If {@link #seek(TopicPartition, long)} is used, it will use the specified offsets on startup and on every * rebalance, to consume data from that offset sequentially on every poll. If not, it will use the last checkpointed * offset using {@link #commit(Map, CommitType) commit(offsets, sync)} for the subscribed list of partitions. - * + * * @param timeout The time, in milliseconds, spent waiting in poll if data is not available. If 0, waits * indefinitely. Must not be negative * @return map of topic to records since the last fetch for the subscribed list of topics and partitions - * + * * @throws NoOffsetForPartitionException If there is no stored offset for a subscribed partition and no automatic * offset reset policy has been configured. */ @@ -683,7 +680,7 @@ public class KafkaConsumer implements Consumer { * A non-blocking commit will attempt to commit offsets asychronously. No error will be thrown if the commit fails. * A blocking commit will wait for a response acknowledging the commit. In the event of an error it will retry until * the commit succeeds. - * + * * @param offsets The list of offsets per partition that should be committed to Kafka. * @param commitType Control whether the commit is blocking */ @@ -708,7 +705,7 @@ public class KafkaConsumer implements Consumer { * This commits offsets only to Kafka. The offsets committed using this API will be used on the first fetch after * every rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API * should not be used. - * + * * @param commitType Whether or not the commit should block until it is acknowledged. */ @Override @@ -757,7 +754,7 @@ public class KafkaConsumer implements Consumer { /** * Returns the offset of the next record that will be fetched (if a record with that offset exists). - * + * * @param partition The partition to get the position for * @return The offset * @throws NoOffsetForPartitionException If a position hasn't been set for a given partition, and no reset policy is @@ -782,7 +779,7 @@ public class KafkaConsumer implements Consumer { *

    * This call may block to do a remote call if the partition in question isn't assigned to this consumer or if the * consumer hasn't yet initialized it's cache of committed offsets. - * + * * @param partition The partition to check * @return The last committed offset or null if no offset has been committed * @throws NoOffsetForPartitionException If no offset has ever been committed by any process for the given @@ -818,7 +815,7 @@ public class KafkaConsumer implements Consumer { /** * Get metadata about the partitions for a given topic. This method will issue a remote call to the server if it * does not already have any metadata about the given topic. - * + * * @param topic The topic to get partition metadata for * @return The list of partitions */ diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java index 2c21dee..1650d85 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -32,7 +32,6 @@ import org.apache.kafka.common.Metric; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.ConfigException; -import org.apache.kafka.common.config.SecurityConfig; import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.RecordTooLargeException; @@ -74,11 +73,11 @@ import org.slf4j.LoggerFactory; * props.put("buffer.memory", 33554432); * props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); * props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); - * + * * Producer producer = new KafkaProducer(props); * for(int i = 0; i < 100; i++) * producer.send(new ProducerRecord("my-topic", Integer.toString(i), Integer.toString(i))); - * + * * producer.close(); * } *

    @@ -93,25 +92,25 @@ import org.slf4j.LoggerFactory; * we have specified will result in blocking on the full commit of the record, the slowest but most durable setting. *

    * If the request fails, the producer can automatically retry, though since we have specified retries - * as 0 it won't. Enabling retries also opens up the possibility of duplicates (see the documentation on + * as 0 it won't. Enabling retries also opens up the possibility of duplicates (see the documentation on * message delivery semantics for details). *

    - * The producer maintains buffers of unsent records for each partition. These buffers are of a size specified by + * The producer maintains buffers of unsent records for each partition. These buffers are of a size specified by * the batch.size config. Making this larger can result in more batching, but requires more memory (since we will * generally have one of these buffers for each active partition). *

    - * By default a buffer is available to send immediately even if there is additional unused space in the buffer. However if you + * By default a buffer is available to send immediately even if there is additional unused space in the buffer. However if you * want to reduce the number of requests you can set linger.ms to something greater than 0. This will - * instruct the producer to wait up to that number of milliseconds before sending a request in hope that more records will - * arrive to fill up the same batch. This is analogous to Nagle's algorithm in TCP. For example, in the code snippet above, - * likely all 100 records would be sent in a single request since we set our linger time to 1 millisecond. However this setting - * would add 1 millisecond of latency to our request waiting for more records to arrive if we didn't fill up the buffer. Note that - * records that arrive close together in time will generally batch together even with linger.ms=0 so under heavy load + * instruct the producer to wait up to that number of milliseconds before sending a request in hope that more records will + * arrive to fill up the same batch. This is analogous to Nagle's algorithm in TCP. For example, in the code snippet above, + * likely all 100 records would be sent in a single request since we set our linger time to 1 millisecond. However this setting + * would add 1 millisecond of latency to our request waiting for more records to arrive if we didn't fill up the buffer. Note that + * records that arrive close together in time will generally batch together even with linger.ms=0 so under heavy load * batching will occur regardless of the linger configuration; however setting this to something larger than 0 can lead to fewer, more * efficient requests when not under maximal load at the cost of a small amount of latency. *

    * The buffer.memory controls the total amount of memory available to the producer for buffering. If records - * are sent faster than they can be transmitted to the server then this buffer space will be exhausted. When the buffer space is + * are sent faster than they can be transmitted to the server then this buffer space will be exhausted. When the buffer space is * exhausted additional send calls will block. For uses where you want to avoid any blocking you can set block.on.buffer.full=false which * will cause the send call to result in an exception. *

    @@ -139,7 +138,6 @@ public class KafkaProducer implements Producer { private final Serializer keySerializer; private final Serializer valueSerializer; private final ProducerConfig producerConfig; - private final SecurityConfig securityConfig; /** * A producer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings @@ -216,7 +214,6 @@ public class KafkaProducer implements Producer { this.maxRequestSize = config.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG); this.totalMemorySize = config.getLong(ProducerConfig.BUFFER_MEMORY_CONFIG); this.compressionType = CompressionType.forName(config.getString(ProducerConfig.COMPRESSION_TYPE_CONFIG)); - this.securityConfig = ClientUtils.parseSecurityConfig(config.getString(ProducerConfig.SECURITY_CONFIG_FILE_CONFIG)); Map metricTags = new LinkedHashMap(); metricTags.put("client-id", clientId); this.accumulator = new RecordAccumulator(config.getInt(ProducerConfig.BATCH_SIZE_CONFIG), @@ -231,7 +228,7 @@ public class KafkaProducer implements Producer { List addresses = ClientUtils.parseAndValidateAddresses(config.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)); this.metadata.update(Cluster.bootstrap(addresses), time.milliseconds()); - NetworkClient client = new NetworkClient(new Selector(this.metrics, time, "producer", metricTags, securityConfig), + NetworkClient client = new NetworkClient(new Selector(this.metrics, time, "producer", metricTags, config.values()), this.metadata, clientId, config.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION), @@ -288,7 +285,7 @@ public class KafkaProducer implements Producer { } /** - * Asynchronously send a record to a topic. Equivalent to send(record, null). + * Asynchronously send a record to a topic. Equivalent to send(record, null). * See {@link #send(ProducerRecord, Callback)} for details. */ @Override @@ -308,11 +305,11 @@ public class KafkaProducer implements Producer { *

    * Since the send call is asynchronous it returns a {@link java.util.concurrent.Future Future} for the * {@link RecordMetadata} that will be assigned to this record. Invoking {@link java.util.concurrent.Future#get() - * get()} on this future will block until the associated request completes and then return the metadata for the record + * get()} on this future will block until the associated request completes and then return the metadata for the record * or throw any exception that occurred while sending the record. *

    * If you want to simulate a simple blocking call you can call the get() method immediately: - * + * *

          * {@code
          * byte[] key = "key".getBytes();
    @@ -323,7 +320,7 @@ public class KafkaProducer implements Producer {
          * 

    * Fully non-blocking usage can make use of the {@link Callback} parameter to provide a callback that * will be invoked when the request is complete. - * + * *

          * {@code
          * ProducerRecord record = new ProducerRecord("the-topic", key, value);
    @@ -337,10 +334,10 @@ public class KafkaProducer implements Producer {
          *               });
          * }
          * 
    - * + * * Callbacks for records being sent to the same partition are guaranteed to execute in order. That is, in the * following example callback1 is guaranteed to execute before callback2: - * + * *
          * {@code
          * producer.send(new ProducerRecord(topic, partition, key1, value1), callback1);
    @@ -352,15 +349,15 @@ public class KafkaProducer implements Producer {
          * they will delay the sending of messages from other threads. If you want to execute blocking or computationally
          * expensive callbacks it is recommended to use your own {@link java.util.concurrent.Executor} in the callback body
          * to parallelize processing.
    -     * 
    +     *
          * @param record The record to send
          * @param callback A user-supplied callback to execute when the record has been acknowledged by the server (null
          *        indicates no callback)
    -     *        
    +     *
          * @throws InterruptException If the thread is interrupted while blocked
          * @throws SerializationException If the key or value are not valid objects given the configured serializers
          * @throws BufferExhaustedException If block.on.buffer.full=false and the buffer is full.
    -     * 
    +     *
          */
         @Override
         public Future send(ProducerRecord record, Callback callback) {
    @@ -455,12 +452,12 @@ public class KafkaProducer implements Producer {
                                                   ProducerConfig.BUFFER_MEMORY_CONFIG +
                                                   " configuration.");
         }
    -    
    +
         /**
    -     * Invoking this method makes all buffered records immediately available to send (even if linger.ms is 
    +     * Invoking this method makes all buffered records immediately available to send (even if linger.ms is
          * greater than 0) and blocks on the completion of the requests associated with these records. The post-condition
    -     * of flush() is that any previously sent record will have completed (e.g. Future.isDone() == true). 
    -     * A request is considered completed when it is successfully acknowledged 
    +     * of flush() is that any previously sent record will have completed (e.g. Future.isDone() == true).
    +     * A request is considered completed when it is successfully acknowledged
          * according to the acks configuration you have specified or else it results in an error.
          * 

    * Other threads can continue sending records while one thread is blocked waiting for a flush call to complete, @@ -478,10 +475,10 @@ public class KafkaProducer implements Producer { * consumer.commit(); * } *

    - * + * * Note that the above example may drop records if the produce request fails. If we want to ensure that this does not occur * we need to set retries=<large_number> in our config. - * + * * @throws InterruptException If the thread is interrupted while blocked */ @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java index 83506e7..69563ee 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -26,6 +26,7 @@ import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Importance; import org.apache.kafka.common.config.ConfigDef.Type; import org.apache.kafka.common.serialization.Serializer; +import org.apache.kafka.common.protocol.SecurityProtocol; /** * Configuration for the Kafka Producer. Documentation for these configurations can be found in the metadata.max.age.ms */ public static final String METADATA_MAX_AGE_CONFIG = CommonClientConfigs.METADATA_MAX_AGE_CONFIG; private static final String METADATA_MAX_AGE_DOC = CommonClientConfigs.METADATA_MAX_AGE_DOC; - + /** batch.size */ public static final String BATCH_SIZE_CONFIG = "batch.size"; private static final String BATCH_SIZE_DOC = "The producer will attempt to batch records together into fewer requests whenever multiple records are being sent" + " to the same partition. This helps performance on both the client and the server. This configuration controls the " @@ -169,9 +170,6 @@ public class ProducerConfig extends AbstractConfig { public static final String VALUE_SERIALIZER_CLASS_CONFIG = "value.serializer"; private static final String VALUE_SERIALIZER_CLASS_DOC = "Serializer class for value that implements the Serializer interface."; - /** security.config.file */ - public static final String SECURITY_CONFIG_FILE_CONFIG = CommonClientConfigs.SECURITY_CONFIG_FILE_CONFIG; - private static final String SECURITY_CONFIG_FILE_DOC = CommonClientConfigs.SECURITY_CONFIG_FILE_DOC; static { @@ -223,7 +221,20 @@ public class ProducerConfig extends AbstractConfig { MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_DOC) .define(KEY_SERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, KEY_SERIALIZER_CLASS_DOC) .define(VALUE_SERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, VALUE_SERIALIZER_CLASS_DOC) - .define(SECURITY_CONFIG_FILE_CONFIG, Type.STRING, "", Importance.MEDIUM, SECURITY_CONFIG_FILE_DOC); + .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, SecurityProtocol.PLAINTEXT.toString(), Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) + .define(CommonClientConfigs.SSL_PROTOCOL_CONFIG, Type.STRING, "TLS", Importance.MEDIUM, CommonClientConfigs.SSL_PROTOCOL_DOC) + .define(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG, Type.LIST, "", Importance.LOW, CommonClientConfigs.SSL_CIPHER_SUITES_DOC) + .define(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Type.LIST, "TLSv1.2, TLSv1.1, TLSv1", Importance.MEDIUM, CommonClientConfigs.SSL_ENABLED_PROTOCOLS_DOC) + .define(CommonClientConfigs.SSL_KEYSTORE_TYPE_CONFIG, Type.STRING, "JKS", Importance.MEDIUM, CommonClientConfigs.SSL_KEYSTORE_TYPE_DOC) + .define(CommonClientConfigs.SSL_KEYSTORE_LOCATION_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_KEYSTORE_LOCATION_DOC, false) + .define(CommonClientConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_KEYSTORE_PASSWORD_DOC, false) + .define(CommonClientConfigs.SSL_KEY_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_KEY_PASSWORD_DOC, false) + .define(CommonClientConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, Type.STRING, "JKS", Importance.MEDIUM, CommonClientConfigs.SSL_TRUSTSTORE_TYPE_DOC) + .define(CommonClientConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_TRUSTSTORE_LOCATION_DOC, false) + .define(CommonClientConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_TRUSTSTORE_PASSWORD_DOC, false) + .define(CommonClientConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, Type.STRING, "SunX509", Importance.LOW, CommonClientConfigs.SSL_KEYMANAGER_ALGORITHM_DOC) + .define(CommonClientConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, Type.STRING, "SunX509", Importance.LOW, CommonClientConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC) + .define(CommonClientConfigs.SSL_CLIENT_REQUIRE_CERT_CONFIG, Type.BOOLEAN, false, Importance.MEDIUM, CommonClientConfigs.SSL_CLIENT_REQUIRE_CERT_DOC); } public static Map addSerializerToConfig(Map configs, diff --git a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java index c4fa058..a8f15bc 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java +++ b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -98,6 +98,12 @@ public class AbstractConfig { return copy; } + public Map values() { + Map copy = new HashMap(); + copy.putAll(values); + return values; + } + private void logAll() { StringBuilder b = new StringBuilder(); b.append(getClass().getSimpleName()); @@ -124,7 +130,7 @@ public class AbstractConfig { /** * Get a configured instance of the give class specified by the given configuration key. If the object implements * Configurable configure it using the configuration. - * + * * @param key The configuration key for the class * @param t The interface the class should implement * @return A configured instance of the class diff --git a/clients/src/main/java/org/apache/kafka/common/config/SecurityConfig.java b/clients/src/main/java/org/apache/kafka/common/config/SecurityConfig.java deleted file mode 100644 index 7954a7e..0000000 --- a/clients/src/main/java/org/apache/kafka/common/config/SecurityConfig.java +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package org.apache.kafka.common.config; - - -import java.util.Map; - -import org.apache.kafka.common.config.ConfigDef.Importance; -import org.apache.kafka.common.config.ConfigDef.Type; -import org.apache.kafka.common.protocol.SecurityProtocol; - - -/** - * Security Related config for clients and server. - */ - -public class SecurityConfig extends AbstractConfig { - /* - * NOTE: DO NOT CHANGE EITHER CONFIG STRINGS OR THEIR JAVA VARIABLE NAMES AS THESE ARE PART OF THE PUBLIC API AND - * CHANGE WILL BREAK USER CODE. - */ - - private static final ConfigDef CONFIG; - - public static final String SECURITY_PROTOCOL_CONFIG = "security.protocol"; - public static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. Currently only PLAINTEXT and SSL are supported."; - - public static final String SSL_PROTOCOL_CONFIG = "ssl.protocol"; - public static final String SSL_PROTOCOL_DOC = "The TLS protocol used for broker connections if security protocol is SSL. " - + "Any version of TLS is accepted by default."; - - public static final String SSL_CIPHER_SUITES_CONFIG = "ssl.cipher.suites"; - public static final String SSL_CIPHER_SUITES_DOC = "The list of cipher suites enabled for SSL connections. " - + "Default value is the list of cipher suites enabled for the Java Virtual Machine."; - - public static final String SSL_ENABLED_PROTOCOLS_CONFIG = "ssl.enabled.protocols"; - public static final String SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. " - + "Default value is the list of protocols enabled for the Java Virtual Machine."; - - - public static final String SSL_KEYSTORE_TYPE_CONFIG = "ssl.keystore.type"; - public static final String SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. " - + "Default value is the default key store format of the Java Virtual Machine."; - - public static final String SSL_KEYSTORE_LOCATION_CONFIG = "ssl.keystore.location"; - public static final String SSL_KEYSTORE_LOCATION_DOC = "The location of the key store file. " - + "This is optional for Client and can be used for two-way authentication for client."; - - public static final String SSL_KEYSTORE_PASSWORD_CONFIG = "ssl.keystore.password"; - public static final String SSL_KEYSTORE_PASSWORD_DOC = "The store password for the key store file. "; - - - public static final String SSL_KEY_PASSWORD_CONFIG = "ssl.key.password"; - public static final String SSL_KEY_PASSWORD_DOC = "The password of the private key in the key store file. " - + "This is optional for client."; - - public static final String SSL_TRUSTSTORE_TYPE_CONFIG = "ssl.truststore.type"; - public static final String SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. " - + "Default value is JKS."; - - public static final String SSL_TRUSTSTORE_LOCATION_CONFIG = "ssl.truststore.location"; - public static final String SSL_TRUSTSTORE_LOCATION_DOC = "The location of the trust store file. "; - - public static final String SSL_TRUSTSTORE_PASSWORD_CONFIG = "ssl.truststore.password"; - public static final String SSL_TRUSTSTORE_PASSWORD_DOC = "The password for the trust store file. "; - - public static final String SSL_CLIENT_REQUIRE_CERT_CONFIG = "ssl.client.require.cert"; - public static final String SSL_CLIENT_REQUIRE_CERT_DOC = "This is to enforce two-way authentication between client and server." - + "Default value is false. If set to true client need to prover Keystrore releated config"; - - public static final String SSL_KEYMANAGER_ALGORITHM_CONFIG = "ssl.keymanager.algorithm"; - public static final String SSL_KEYMANAGER_ALGORITHM_DOC = "The algorithm used by key manager factory for SSL connections. " - + "Default value is the key manager factory algorithm configured for the Java Virtual Machine."; - - public static final String SSL_TRUSTMANAGER_ALGORITHM_CONFIG = "ssl.trustmanager.algorithm"; - public static final String SSL_TRUSTMANAGER_ALGORITHM_DOC = "The algorithm used by trust manager factory for SSL connections. " - + "Default value is the trust manager factory algorithm configured for the Java Virtual Machine."; - - - static { - CONFIG = new ConfigDef().define(SECURITY_PROTOCOL_CONFIG, Type.STRING, SecurityProtocol.PLAINTEXT.toString(), Importance.MEDIUM, SECURITY_PROTOCOL_DOC) - .define(SSL_PROTOCOL_CONFIG, Type.STRING, "TLS", Importance.MEDIUM, SSL_PROTOCOL_DOC) - .define(SSL_CIPHER_SUITES_CONFIG, Type.LIST, Importance.MEDIUM, SSL_CIPHER_SUITES_DOC, false) - .define(SSL_ENABLED_PROTOCOLS_CONFIG, Type.LIST, "TLSv1.2, TLSv1.1, TLSv1", Importance.MEDIUM, SSL_ENABLED_PROTOCOLS_DOC) - .define(SSL_KEYSTORE_TYPE_CONFIG, Type.STRING, Importance.MEDIUM, SSL_KEYSTORE_TYPE_DOC, false) - .define(SSL_KEYSTORE_LOCATION_CONFIG, Type.STRING, Importance.MEDIUM, SSL_KEYSTORE_LOCATION_DOC, false) - .define(SSL_KEYSTORE_PASSWORD_CONFIG, Type.STRING, Importance.MEDIUM, SSL_KEYSTORE_PASSWORD_DOC, false) - .define(SSL_KEY_PASSWORD_CONFIG, Type.STRING, Importance.MEDIUM, SSL_KEY_PASSWORD_DOC, false) - .define(SSL_TRUSTSTORE_TYPE_CONFIG, Type.STRING, Importance.MEDIUM, SSL_TRUSTSTORE_TYPE_DOC, false) - .define(SSL_TRUSTSTORE_LOCATION_CONFIG, Type.STRING, Importance.MEDIUM, SSL_TRUSTSTORE_LOCATION_DOC, false) - .define(SSL_TRUSTSTORE_PASSWORD_CONFIG, Type.STRING, Importance.MEDIUM, SSL_TRUSTSTORE_PASSWORD_DOC, false) - .define(SSL_KEYMANAGER_ALGORITHM_CONFIG, Type.STRING, Importance.MEDIUM, SSL_KEYMANAGER_ALGORITHM_DOC, false) - .define(SSL_TRUSTMANAGER_ALGORITHM_CONFIG, Type.STRING, Importance.MEDIUM, SSL_TRUSTMANAGER_ALGORITHM_DOC, false) - .define(SSL_CLIENT_REQUIRE_CERT_CONFIG, Type.BOOLEAN, false, Importance.MEDIUM, SSL_CLIENT_REQUIRE_CERT_DOC); - } - - public SecurityConfig(Map props) { - super(CONFIG, props); - } - - -} diff --git a/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java b/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java index ee8516f..1b6b32a 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java @@ -43,7 +43,7 @@ public interface Authenticator { /** * Returns UserPrincipal after authentication is established */ - UserPrincipal userPrincipal(); + UserPrincipal userPrincipal() throws IOException; /** diff --git a/clients/src/main/java/org/apache/kafka/common/network/Channel.java b/clients/src/main/java/org/apache/kafka/common/network/Channel.java index 3526ba3..d9d1192 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Channel.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Channel.java @@ -58,7 +58,7 @@ public class Channel implements ScatteringByteChannel, GatheringByteChannel { * Incase of PLAINTEXT and No Authentication returns ANONYMOUS as the userPrincipal * If SSL used without any SASL Authentication returns SSLSession.peerPrincipal */ - public UserPrincipal userPrincipal() { + public UserPrincipal userPrincipal() throws IOException { return authenticator.userPrincipal(); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java index c1ec794..97b1135 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java @@ -34,7 +34,7 @@ public class DefaultAuthenticator implements Authenticator { return 0; } - public UserPrincipal userPrincipal() { + public UserPrincipal userPrincipal() throws IOException { return new UserPrincipal(transportLayer.getPeerPrincipal().toString()); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java index 11cd80c..dbf0a30 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java @@ -42,7 +42,6 @@ public class PlainTextTransportLayer implements TransportLayer { public PlainTextTransportLayer(SocketChannel socketChannel) throws IOException { this.socketChannel = socketChannel; - } @@ -141,7 +140,7 @@ public class PlainTextTransportLayer implements TransportLayer { return outStream; } - public Principal getPeerPrincipal() { + public Principal getPeerPrincipal() throws IOException { return new UserPrincipal("ANONYMOUS"); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java index 9cf9051..73f976b 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java @@ -16,6 +16,8 @@ */ package org.apache.kafka.common.network; +import java.util.Map; +import java.util.List; import java.io.FileInputStream; import java.io.IOException; import java.security.GeneralSecurityException; @@ -24,10 +26,11 @@ import java.security.KeyStore; import javax.net.ssl.*; import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.config.SecurityConfig; +import org.apache.kafka.common.Configurable; +import org.apache.kafka.clients.CommonClientConfigs; -public class SSLFactory { +public class SSLFactory implements Configurable { public enum Mode { CLIENT, SERVER }; private String protocol; @@ -48,27 +51,31 @@ public class SSLFactory { this.mode = mode; } - - public void init(SecurityConfig securityConfig) throws IOException, GeneralSecurityException { - this.protocol = securityConfig.getString(SecurityConfig.SSL_PROTOCOL_CONFIG); - if (securityConfig.getList(SecurityConfig.SSL_CIPHER_SUITES_CONFIG) != null) - this.cipherSuites = (String[]) securityConfig.getList(SecurityConfig.SSL_CIPHER_SUITES_CONFIG).toArray(); - if (securityConfig.getList(SecurityConfig.SSL_ENABLED_PROTOCOLS_CONFIG) != null) - this.enabledProtocols = (String[]) securityConfig.getList(SecurityConfig.SSL_ENABLED_PROTOCOLS_CONFIG).toArray(); - this.requireClientCert = securityConfig.getBoolean(SecurityConfig.SSL_CLIENT_REQUIRE_CERT_CONFIG); - this.kmfAlgorithm = securityConfig.getString(SecurityConfig.SSL_KEYMANAGER_ALGORITHM_CONFIG); - this.tmfAlgorithm = securityConfig.getString(SecurityConfig.SSL_TRUSTMANAGER_ALGORITHM_CONFIG); - if ((mode == Mode.CLIENT && requireClientCert) || (mode == Mode.SERVER)) - createKeystore(securityConfig.getString(SecurityConfig.SSL_KEYSTORE_TYPE_CONFIG), - securityConfig.getString(SecurityConfig.SSL_KEYSTORE_LOCATION_CONFIG), - securityConfig.getString(SecurityConfig.SSL_KEYSTORE_PASSWORD_CONFIG), - securityConfig.getString(SecurityConfig.SSL_KEY_PASSWORD_CONFIG)); - createTruststore(securityConfig.getString(SecurityConfig.SSL_TRUSTSTORE_TYPE_CONFIG), - securityConfig.getString(SecurityConfig.SSL_TRUSTSTORE_LOCATION_CONFIG), - securityConfig.getString(SecurityConfig.SSL_TRUSTSTORE_PASSWORD_CONFIG)); - - this.sslContext = createSSLContext(); - + @Override + public void configure(Map configs) throws KafkaException { + this.protocol = (String) configs.get(CommonClientConfigs.SSL_PROTOCOL_CONFIG); + if (configs.get(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG) != null) + this.cipherSuites = (String[]) ((List) configs.get(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG)).toArray(); + if (configs.get(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG) != null) + this.enabledProtocols = (String[]) ((List) configs.get(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG)).toArray(); + this.requireClientCert = (Boolean) configs.get(CommonClientConfigs.SSL_CLIENT_REQUIRE_CERT_CONFIG); + this.kmfAlgorithm = (String) configs.get(CommonClientConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG); + this.tmfAlgorithm = (String) configs.get(CommonClientConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG); + if ((mode == Mode.CLIENT && requireClientCert) || (mode == Mode.SERVER)) { + createKeystore((String) configs.get(CommonClientConfigs.SSL_KEYSTORE_TYPE_CONFIG), + (String) configs.get(CommonClientConfigs.SSL_KEYSTORE_LOCATION_CONFIG), + (String) configs.get(CommonClientConfigs.SSL_KEYSTORE_PASSWORD_CONFIG), + (String) configs.get(CommonClientConfigs.SSL_KEY_PASSWORD_CONFIG)); + } + createTruststore((String) configs.get(CommonClientConfigs.SSL_TRUSTSTORE_TYPE_CONFIG), + (String) configs.get(CommonClientConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG), + (String) configs.get(CommonClientConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG)); + try { + this.sslContext = createSSLContext(); + } catch (Exception e) { + e.printStackTrace(); + throw new KafkaException(e); + } } @@ -183,6 +190,4 @@ public class SSLFactory { } } - public void close() { - } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index dc84975..2d6a519 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -27,9 +27,12 @@ import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLEngineResult; import javax.net.ssl.SSLEngineResult.HandshakeStatus; import javax.net.ssl.SSLEngineResult.Status; +import javax.net.ssl.SSLException; +import javax.net.ssl.SSLPeerUnverifiedException; import java.io.DataInputStream; import java.io.DataOutputStream; +import java.util.concurrent.ExecutorService; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; @@ -41,25 +44,27 @@ import org.slf4j.LoggerFactory; public class SSLTransportLayer implements TransportLayer { private static final Logger log = LoggerFactory.getLogger(SSLTransportLayer.class); - SocketChannel socketChannel; - SSLEngine sslEngine; - HandshakeStatus handshakeStatus = null; - SSLEngineResult handshakeResult = null; - boolean handshakeComplete = false; - boolean closed = false; - boolean closing = false; - ByteBuffer netInBuffer = null; - ByteBuffer netOutBuffer = null; - ByteBuffer appReadBuffer = null; - ByteBuffer appWriteBuffer = null; - ByteBuffer emptyBuf = ByteBuffer.allocate(0); - DataInputStream inStream = null; - DataOutputStream outStream = null; - - - public SSLTransportLayer(SocketChannel socketChannel, SSLEngine sslEngine) throws IOException { + protected SSLEngine sslEngine; + + private SocketChannel socketChannel; + private HandshakeStatus handshakeStatus; + private SSLEngineResult handshakeResult; + private boolean handshakeComplete = false; + private boolean closed = false; + private boolean closing = false; + private ByteBuffer netInBuffer; + private ByteBuffer netOutBuffer; + private ByteBuffer appReadBuffer; + private ByteBuffer appWriteBuffer; + private ByteBuffer emptyBuf = ByteBuffer.allocate(0); + private DataInputStream inStream; + private DataOutputStream outStream; + private ExecutorService executorService; + + public SSLTransportLayer(SocketChannel socketChannel, SSLEngine sslEngine, ExecutorService executorService) throws IOException { this.socketChannel = socketChannel; this.sslEngine = sslEngine; + this.executorService = executorService; this.netInBuffer = ByteBuffer.allocateDirect(sslEngine.getSession().getPacketBufferSize()); this.netOutBuffer = ByteBuffer.allocateDirect(sslEngine.getSession().getPacketBufferSize()); this.appWriteBuffer = ByteBuffer.allocateDirect(sslEngine.getSession().getApplicationBufferSize()); @@ -116,56 +121,60 @@ public class SSLTransportLayer implements TransportLayer { if (handshakeComplete) return 0; //we have done our initial handshake if (!flush(netOutBuffer)) return SelectionKey.OP_WRITE; - - switch(handshakeStatus) { - case NOT_HANDSHAKING: - // SSLEnginge.getHandshakeStatus is transient and it doesn't record FINISHED status properly - if (handshakeResult.getHandshakeStatus() == HandshakeStatus.FINISHED) { + try { + switch(handshakeStatus) { + case NOT_HANDSHAKING: + // SSLEnginge.getHandshakeStatus is transient and it doesn't record FINISHED status properly + if (handshakeResult.getHandshakeStatus() == HandshakeStatus.FINISHED) { + handshakeComplete = !netOutBuffer.hasRemaining(); + if (handshakeComplete) + return 0; + else + return SelectionKey.OP_WRITE; + } else { + throw new IOException("NOT_HANDSHAKING during handshake"); + } + case FINISHED: + //we are complete if we have delivered the last package handshakeComplete = !netOutBuffer.hasRemaining(); - if (handshakeComplete) - return 0; - else + //return 0 if we are complete, otherwise we still have data to write + if (handshakeComplete) return 0; + else return SelectionKey.OP_WRITE; + case NEED_WRAP: + handshakeResult = handshakeWrap(write); + if (handshakeResult.getStatus() == Status.OK) { + if (handshakeStatus == HandshakeStatus.NEED_TASK) + handshakeStatus = tasks(); + } else { + //wrap should always work with our buffers + throw new IOException("Unexpected status [" + handshakeResult.getStatus() + "] during handshake WRAP."); + } + if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || (!flush(netOutBuffer))) return SelectionKey.OP_WRITE; - } else { - //should never happen - throw new IOException("NOT_HANDSHAKING during handshake"); - } - case FINISHED: - //we are complete if we have delivered the last package - handshakeComplete = !netOutBuffer.hasRemaining(); - //return 0 if we are complete, otherwise we still have data to write - if (handshakeComplete) return 0; - else return SelectionKey.OP_WRITE; - case NEED_WRAP: - handshakeResult = handshakeWrap(write); - if (handshakeResult.getStatus() == Status.OK) { - if (handshakeStatus == HandshakeStatus.NEED_TASK) - handshakeStatus = tasks(); - } else { - //wrap should always work with our buffers - throw new IOException("Unexpected status [" + handshakeResult.getStatus() + "] during handshake WRAP."); - } - if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || (!flush(netOutBuffer))) - return SelectionKey.OP_WRITE; - //fall down to NEED_UNWRAP on the same call, will result in a - //BUFFER_UNDERFLOW if it needs data - case NEED_UNWRAP: - handshakeResult = handshakeUnwrap(read); - if (handshakeResult.getStatus() == Status.OK) { - if (handshakeStatus == HandshakeStatus.NEED_TASK) - handshakeStatus = tasks(); - } else if (handshakeResult.getStatus() == Status.BUFFER_UNDERFLOW) { - return SelectionKey.OP_READ; - } else { - throw new IOException(String.format("Unexpected status [%s] during handshake UNWRAP", handshakeStatus)); - } - break; - case NEED_TASK: - handshakeStatus = tasks(); - break; - default: - throw new IllegalStateException(String.format("Unexpected status [%s]", handshakeStatus)); + //fall down to NEED_UNWRAP on the same call, will result in a + //BUFFER_UNDERFLOW if it needs data + case NEED_UNWRAP: + handshakeResult = handshakeUnwrap(read); + if (handshakeResult.getStatus() == Status.OK) { + if (handshakeStatus == HandshakeStatus.NEED_TASK) + handshakeStatus = tasks(); + } else if (handshakeResult.getStatus() == Status.BUFFER_UNDERFLOW) { + return SelectionKey.OP_READ; + } else { + throw new IOException(String.format("Unexpected status [%s] during handshake UNWRAP", handshakeStatus)); + } + break; + case NEED_TASK: + handshakeStatus = tasks(); + break; + default: + throw new IllegalStateException(String.format("Unexpected status [%s]", handshakeStatus)); + } + } catch (SSLException e) { + handshakeFailure(); + throw e; } + //return 0 if we are complete, otherwise re-register for any activity that //would cause this method to be called again. if (handshakeComplete) return 0; @@ -173,12 +182,22 @@ public class SSLTransportLayer implements TransportLayer { } /** - * Executes all the tasks needed on the same thread. + * Executes all the tasks needed on the executorservice thread. * @return HandshakeStatus */ private HandshakeStatus tasks() { - Runnable r = null; - while ((r = sslEngine.getDelegatedTask()) != null) r.run(); + for (;;) { + final Runnable task = sslEngine.getDelegatedTask(); + if (task == null) + break; + + executorService.submit(new Runnable() { + @Override + public void run() { + task.run(); + } + }); + } return sslEngine.getHandshakeStatus(); } @@ -237,10 +256,6 @@ public class SSLTransportLayer implements TransportLayer { } - public int getOutboundRemaining() { - return netOutBuffer.remaining(); - } - /** * Sends a SSL close message, will not physically close the connection here.
    * @throws IOException if an I/O error occurs @@ -283,7 +298,7 @@ public class SSLTransportLayer implements TransportLayer { * @param dst The buffer into which bytes are to be transferred * @return The number of bytes read, possible zero or -1 if the channel has reached end-of-stream * @throws IOException if some other I/O error occurs - * @throws IllegalStateException if the destination buffer is different than appBufHandler.getReadBuffer() + * @throws IllegalStateException if handshake is not complete. */ public int read(ByteBuffer dst) throws IOException { if (closing || closed) return -1; @@ -305,16 +320,10 @@ public class SSLTransportLayer implements TransportLayer { if (unwrap.getHandshakeStatus() == HandshakeStatus.NEED_TASK) tasks(); //if we need more network data, than return for now. if (unwrap.getStatus() == Status.BUFFER_UNDERFLOW) return readFromAppBuffer(dst); - } else if (unwrap.getStatus() == Status.BUFFER_OVERFLOW && read > 0) { + } else if (unwrap.getStatus() == Status.BUFFER_OVERFLOW) { appReadBuffer = Utils.ensureCapacity(appReadBuffer, applicationBufferSize()); - //buffer overflow can happen, if we have read data, then //empty out the dst buffer before we do another read return readFromAppBuffer(dst); - } else { - //here we should trap BUFFER_OVERFLOW and call expand on the buffer - // for now, throw an exception, as we initialized the buffers - // in constructor - throw new IOException(String.format("Unable to unwrap data, invalid status [%s]", unwrap.getStatus())); } } while(netInBuffer.position() != 0); return readFromAppBuffer(dst); @@ -335,7 +344,6 @@ public class SSLTransportLayer implements TransportLayer { return totalRead; } - /** * Writes a sequence of bytes to this channel from the given buffer. * @@ -346,24 +354,20 @@ public class SSLTransportLayer implements TransportLayer { public int write(ByteBuffer src) throws IOException { int written = 0; - if (src == this.netOutBuffer) - written = socketChannel.write(src); - else { - if (closing || closed) throw new IOException("Channel is in closing state"); - if (!flush(netOutBuffer)) - return written; - netOutBuffer.clear(); - SSLEngineResult result = sslEngine.wrap(src, netOutBuffer); - written = result.bytesConsumed(); - netOutBuffer.flip(); - if (result.getStatus() == Status.OK) { - if (result.getHandshakeStatus() == HandshakeStatus.NEED_TASK) - tasks(); - } else { - throw new IOException(String.format("Unable to wrap data, invalid status %s", result.getStatus())); - } - flush(netOutBuffer); + if (closing || closed) throw new IOException("Channel is in closing state"); + if (!flush(netOutBuffer)) + return written; + netOutBuffer.clear(); + SSLEngineResult result = sslEngine.wrap(src, netOutBuffer); + written = result.bytesConsumed(); + netOutBuffer.flip(); + if (result.getStatus() == Status.OK) { + if (result.getHandshakeStatus() == HandshakeStatus.NEED_TASK) + tasks(); + } else { + throw new IOException(String.format("Unable to wrap data, invalid status %s", result.getStatus())); } + flush(netOutBuffer); return written; } @@ -396,9 +400,12 @@ public class SSLTransportLayer implements TransportLayer { return outStream; } - public Principal getPeerPrincipal() { - //return sslEngine.getSession().getPeerPrincipal(); - return null; + public Principal getPeerPrincipal() throws IOException { + try { + return sslEngine.getSession().getPeerPrincipal(); + } catch (SSLPeerUnverifiedException se) { + throw new IOException(String.format("Unable to retrieve getPeerPrincipal due to %s", se)); + } } private int readFromAppBuffer(ByteBuffer dst) { @@ -427,4 +434,14 @@ public class SSLTransportLayer implements TransportLayer { private int applicationBufferSize() { return sslEngine.getSession().getApplicationBufferSize(); } + + private void handshakeFailure() { + //Release all resources such as internal buffers that SSLEngine is managing + sslEngine.closeOutbound(); + try { + sslEngine.closeInbound(); + } catch (SSLException e) { + log.debug("SSLEngine.closeInBound() raised an exception.", e); + } + } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index 0068143..90e2cee 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -29,9 +29,10 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.config.SecurityConfig; import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.MetricName; @@ -43,6 +44,7 @@ import org.apache.kafka.common.metrics.stats.Max; import org.apache.kafka.common.metrics.stats.Rate; import org.apache.kafka.common.protocol.SecurityProtocol; import org.apache.kafka.common.utils.Time; +import org.apache.kafka.clients.CommonClientConfigs; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -90,14 +92,14 @@ public class Selector implements Selectable { private final SelectorMetrics sensors; private final String metricGrpPrefix; private final Map metricTags; - private final SecurityConfig securityConfig; private final SecurityProtocol securityProtocol; private SSLFactory sslFactory = null; + private ExecutorService executorService = null; /** * Create a new selector */ - public Selector(Metrics metrics, Time time, String metricGrpPrefix, Map metricTags, SecurityConfig securityConfig) { + public Selector(Metrics metrics, Time time, String metricGrpPrefix, Map metricTags, Map configs) { try { this.selector = java.nio.channels.Selector.open(); } catch (IOException e) { @@ -114,17 +116,13 @@ public class Selector implements Selectable { this.disconnected = new ArrayList(); this.failedSends = new ArrayList(); this.sensors = new SelectorMetrics(metrics); - this.securityConfig = securityConfig; - this.securityProtocol = SecurityProtocol.valueOf(securityConfig.getString(SecurityConfig.SECURITY_PROTOCOL_CONFIG)); - try { - if (securityProtocol == SecurityProtocol.SSL) { - this.sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT); - this.sslFactory.init(this.securityConfig); - } - } catch (Exception e) { - throw new KafkaException(e); + this.securityProtocol = configs.containsKey(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG) ? + SecurityProtocol.valueOf((String) configs.get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)) : SecurityProtocol.PLAINTEXT; + if (securityProtocol == SecurityProtocol.SSL) { + this.executorService = Executors.newScheduledThreadPool(1); + this.sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT); + this.sslFactory.configure(configs); } - } /** @@ -166,7 +164,8 @@ public class Selector implements Selectable { if (securityProtocol == SecurityProtocol.SSL) { transportLayer = new SSLTransportLayer(socketChannel, sslFactory.createSSLEngine(socket.getInetAddress().getHostName(), - socket.getPort())); + socket.getPort()), + executorService); } else { transportLayer = new PlainTextTransportLayer(socketChannel); } @@ -206,8 +205,12 @@ public class Selector implements Selectable { close(key); try { this.selector.close(); + if (this.executorService != null) + this.executorService.shutdown(); } catch (IOException e) { log.error("Exception closing selector:", e); + } catch (SecurityException se) { + log.error("Exception closing selector:", se); } } @@ -463,7 +466,7 @@ public class Selector implements Selectable { } /** - * Get the socket channel associated with this selection key + * Get the Channel associated with this selection key */ private Channel channel(SelectionKey key) { return this.channels.get(key); diff --git a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java index 6ce013b..ae10f7c 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java @@ -82,5 +82,5 @@ public interface TransportLayer { boolean flush(ByteBuffer buffer) throws IOException; - Principal getPeerPrincipal(); + Principal getPeerPrincipal() throws IOException; } diff --git a/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java b/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java index 47dda69..7f34738 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java +++ b/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java @@ -12,7 +12,7 @@ */ package org.apache.kafka.common.network; -import org.apache.kafka.common.config.SecurityConfig; +import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.common.protocol.SecurityProtocol; import java.io.DataInputStream; @@ -22,8 +22,9 @@ import java.net.ServerSocket; import java.net.Socket; import java.util.ArrayList; import java.util.Collections; +import java.util.Map; import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; + /** @@ -34,17 +35,16 @@ class EchoServer extends Thread { private final ServerSocket serverSocket; private final List threads; private final List sockets; - private SecurityProtocol protocol; + private SecurityProtocol protocol = SecurityProtocol.PLAINTEXT; private SSLFactory sslFactory; - private final AtomicBoolean startHandshake = new AtomicBoolean(); - public EchoServer(SecurityConfig securityConfig) throws Exception { - this.protocol = SecurityProtocol.valueOf(securityConfig.getString(SecurityConfig.SECURITY_PROTOCOL_CONFIG)); + public EchoServer(Map configs) throws Exception { + this.protocol = configs.containsKey(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG) ? + SecurityProtocol.valueOf((String) configs.get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)) : SecurityProtocol.PLAINTEXT; if (protocol == SecurityProtocol.SSL) { this.sslFactory = new SSLFactory(SSLFactory.Mode.SERVER); - this.sslFactory.init(securityConfig); + this.sslFactory.configure(configs); this.serverSocket = sslFactory.createSSLServerSocketFactory().createServerSocket(0); - this.startHandshake.set(true); } else { this.serverSocket = new ServerSocket(0); } @@ -105,4 +105,4 @@ class EchoServer extends Thread { t.join(); join(); } -} \ No newline at end of file +} diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java index f3c6153..c5f8ecf 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java @@ -14,12 +14,12 @@ package org.apache.kafka.common.network; import static org.junit.Assert.assertEquals; +import java.util.Map; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.util.LinkedHashMap; -import org.apache.kafka.common.config.SecurityConfig; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.test.TestSSLUtils; @@ -42,12 +42,10 @@ public class SSLSelectorTest { @Before public void setup() throws Exception { - SecurityConfig serverSecurityConfig = TestSSLUtils.createSSLConfigFile(SSLFactory.Mode.SERVER, null); - this.server = new EchoServer(serverSecurityConfig); + Map> sslConfigs = TestSSLUtils.createSSLConfigs(false, true); + this.server = new EchoServer(sslConfigs.get(SSLFactory.Mode.SERVER)); this.server.start(); - String trustStoreServer = serverSecurityConfig.getString(SecurityConfig.SSL_TRUSTSTORE_LOCATION_CONFIG); - SecurityConfig clientSecurityConfig = TestSSLUtils.createSSLConfigFile(SSLFactory.Mode.CLIENT, trustStoreServer); - this.selector = new Selector(new Metrics(), new MockTime(), "MetricGroup", new LinkedHashMap(), clientSecurityConfig); + this.selector = new Selector(new Metrics(), new MockTime(), "MetricGroup", new LinkedHashMap(), sslConfigs.get(SSLFactory.Mode.CLIENT)); } @After @@ -68,6 +66,44 @@ public class SSLSelectorTest { assertEquals(big, blockingRequest(node, big)); } + + /** + * Validate that when the server disconnects, a client send ends up with that node in the disconnected list. + */ + @Test + public void testServerDisconnect() throws Exception { + int node = 0; + // connect and do a simple request + blockingConnect(node); + assertEquals("hello", blockingRequest(node, "hello")); + + // disconnect + this.server.closeConnections(); + while (!selector.disconnected().contains(node)) + selector.poll(1000L); + + // reconnect and do another request + blockingConnect(node); + assertEquals("hello", blockingRequest(node, "hello")); + } + + + /** + * Tests wrap BUFFER_OVERFLOW and unwrap BUFFER_UNDERFLOW + * @throws Exception + */ + @Test + public void testLargeMessageSequence() throws Exception { + int bufferSize = 512 * 1024; + int node = 0; + int reqs = 50; + InetSocketAddress addr = new InetSocketAddress("localhost", server.port); + selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); + String requestPrefix = TestUtils.randomString(bufferSize); + sendAndReceive(node, requestPrefix, 0, reqs); + } + + private String blockingRequest(int node, String s) throws IOException { selector.send(createSend(node, s)); selector.poll(1000L); @@ -94,4 +130,26 @@ public class SSLSelectorTest { selector.poll(10000L); } -} \ No newline at end of file + + private void sendAndReceive(int node, String requestPrefix, int startIndex, int endIndex) throws Exception { + int requests = startIndex; + int responses = startIndex; + selector.send(createSend(node, requestPrefix + "-" + startIndex)); + requests++; + while (responses < endIndex) { + // do the i/o + selector.poll(0L); + assertEquals("No disconnects should have occurred.", 0, selector.disconnected().size()); + + // handle requests and responses of the fast node + for (NetworkReceive receive : selector.completedReceives()) { + assertEquals(requestPrefix + "-" + responses, asString(receive)); + responses++; + } + + for (int i = 0; i < selector.completedSends().size() && requests < endIndex; i++, requests++) { + selector.send(createSend(node, requestPrefix + "-" + requests)); + } + } + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java index e4100d3..3fd8fe2 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -15,14 +15,14 @@ package org.apache.kafka.common.network; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.util.HashMap; +import java.util.Map; import java.io.IOException; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.nio.ByteBuffer; import java.util.LinkedHashMap; -import org.apache.kafka.clients.ClientUtils; -import org.apache.kafka.common.config.SecurityConfig; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Utils; @@ -43,10 +43,10 @@ public class SelectorTest { @Before public void setup() throws Exception { - SecurityConfig securityConfig = ClientUtils.parseSecurityConfig(""); - this.server = new EchoServer(securityConfig); + Map configs = new HashMap(); + this.server = new EchoServer(configs); this.server.start(); - this.selector = new Selector(new Metrics(), new MockTime() , "MetricGroup", new LinkedHashMap(), securityConfig); + this.selector = new Selector(new Metrics(), new MockTime() , "MetricGroup", new LinkedHashMap(), configs); } @After diff --git a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java index c811096..bfb52ef 100644 --- a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java +++ b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java @@ -17,19 +17,8 @@ package org.apache.kafka.test; -import org.apache.kafka.common.config.SecurityConfig; +import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.common.network.SSLFactory; -import sun.security.x509.AlgorithmId; -import sun.security.x509.CertificateAlgorithmId; -import sun.security.x509.CertificateIssuerName; -import sun.security.x509.CertificateSerialNumber; -import sun.security.x509.CertificateSubjectName; -import sun.security.x509.CertificateValidity; -import sun.security.x509.CertificateVersion; -import sun.security.x509.CertificateX509Key; -import sun.security.x509.X500Name; -import sun.security.x509.X509CertImpl; -import sun.security.x509.X509CertInfo; import java.io.File; import java.io.FileOutputStream; @@ -41,14 +30,19 @@ import java.security.KeyPair; import java.security.KeyPairGenerator; import java.security.KeyStore; import java.security.NoSuchAlgorithmException; -import java.security.PrivateKey; import java.security.SecureRandom; import java.security.cert.Certificate; import java.security.cert.X509Certificate; +import java.security.InvalidKeyException; +import java.security.NoSuchProviderException; +import java.security.SignatureException; +import java.security.cert.CertificateEncodingException; +import javax.security.auth.x500.X500Principal; +import org.bouncycastle.x509.X509V1CertificateGenerator; + import java.util.Date; import java.util.HashMap; import java.util.Map; -import java.util.Properties; public class TestSSLUtils { @@ -66,36 +60,24 @@ public class TestSSLUtils { * @throws GeneralSecurityException thrown if an Security error ocurred. */ public static X509Certificate generateCertificate(String dn, KeyPair pair, - int days, String algorithm) throws GeneralSecurityException, IOException { - PrivateKey privkey = pair.getPrivate(); - X509CertInfo info = new X509CertInfo(); + int days, String algorithm) + throws CertificateEncodingException, InvalidKeyException, IllegalStateException, + NoSuchProviderException, NoSuchAlgorithmException, SignatureException { Date from = new Date(); Date to = new Date(from.getTime() + days * 86400000L); - CertificateValidity interval = new CertificateValidity(from, to); BigInteger sn = new BigInteger(64, new SecureRandom()); - X500Name owner = new X500Name(dn); - - info.set(X509CertInfo.VALIDITY, interval); - info.set(X509CertInfo.SERIAL_NUMBER, new CertificateSerialNumber(sn)); - info.set(X509CertInfo.SUBJECT, new CertificateSubjectName(owner)); - info.set(X509CertInfo.ISSUER, new CertificateIssuerName(owner)); - info.set(X509CertInfo.KEY, new CertificateX509Key(pair.getPublic())); - info - .set(X509CertInfo.VERSION, new CertificateVersion(CertificateVersion.V3)); - AlgorithmId algo = new AlgorithmId(AlgorithmId.md5WithRSAEncryption_oid); - info.set(X509CertInfo.ALGORITHM_ID, new CertificateAlgorithmId(algo)); - - // Sign the cert to identify the algorithm that's used. - X509CertImpl cert = new X509CertImpl(info); - cert.sign(privkey, algorithm); - - // Update the algorith, and resign. - algo = (AlgorithmId) cert.get(X509CertImpl.SIG_ALG); - info - .set(CertificateAlgorithmId.NAME + "." + CertificateAlgorithmId.ALGORITHM, - algo); - cert = new X509CertImpl(info); - cert.sign(privkey, algorithm); + KeyPair keyPair = pair; + X509V1CertificateGenerator certGen = new X509V1CertificateGenerator(); + X500Principal dnName = new X500Principal(dn); + + certGen.setSerialNumber(sn); + certGen.setIssuerDN(dnName); + certGen.setNotBefore(from); + certGen.setNotAfter(to); + certGen.setSubjectDN(dnName); + certGen.setPublicKey(keyPair.getPublic()); + certGen.setSignatureAlgorithm(algorithm); + X509Certificate cert = certGen.generate(pair.getPrivate()); return cert; } @@ -168,41 +150,72 @@ public class TestSSLUtils { saveKeyStore(ks, filename, password); } - public static SecurityConfig createSSLConfigFile(SSLFactory.Mode mode, String trustStoreFileClient) throws IOException, GeneralSecurityException { - Properties securityConfigProps = new Properties(); + public static Map createX509Certificates(KeyPair keyPair) + throws GeneralSecurityException { Map certs = new HashMap(); - KeyPair keyPair = generateKeyPair("RSA"); X509Certificate cert = generateCertificate("CN=localhost, O=localhost", keyPair, 30, "SHA1withRSA"); - String password = "test"; - - if (mode == SSLFactory.Mode.SERVER) { - File keyStoreFile = File.createTempFile("keystore", ".jks"); - createKeyStore(keyStoreFile.getPath(), password, password, "localhost", keyPair.getPrivate(), cert); - certs.put("localhost", cert); - securityConfigProps.put(SecurityConfig.SSL_KEYSTORE_LOCATION_CONFIG, keyStoreFile.getPath()); - securityConfigProps.put(SecurityConfig.SSL_KEYSTORE_TYPE_CONFIG, "JKS"); - securityConfigProps.put(SecurityConfig.SSL_KEYMANAGER_ALGORITHM_CONFIG, "SunX509"); - securityConfigProps.put(SecurityConfig.SSL_KEYSTORE_PASSWORD_CONFIG, password); - securityConfigProps.put(SecurityConfig.SSL_KEY_PASSWORD_CONFIG, password); - - File trustStoreFile = File.createTempFile("truststore", ".jks"); - createTrustStore(trustStoreFile.getPath(), password, certs); - - securityConfigProps.put(SecurityConfig.SECURITY_PROTOCOL_CONFIG, "SSL"); - securityConfigProps.put(SecurityConfig.SSL_CLIENT_REQUIRE_CERT_CONFIG, "false"); - securityConfigProps.put(SecurityConfig.SSL_TRUSTSTORE_LOCATION_CONFIG, trustStoreFile.getPath()); - securityConfigProps.put(SecurityConfig.SSL_TRUSTSTORE_PASSWORD_CONFIG, password); - securityConfigProps.put(SecurityConfig.SSL_TRUSTSTORE_TYPE_CONFIG, "JKS"); - } else { - securityConfigProps.put(SecurityConfig.SECURITY_PROTOCOL_CONFIG, "SSL"); - securityConfigProps.put(SecurityConfig.SSL_CLIENT_REQUIRE_CERT_CONFIG, "false"); - securityConfigProps.put(SecurityConfig.SSL_TRUSTSTORE_LOCATION_CONFIG, trustStoreFileClient); - securityConfigProps.put(SecurityConfig.SSL_TRUSTSTORE_PASSWORD_CONFIG, password); - securityConfigProps.put(SecurityConfig.SSL_TRUSTSTORE_TYPE_CONFIG, "JKS"); + certs.put("localhost", cert); + return certs; + } + + public static Map createSSLConfig(SSLFactory.Mode mode, File keyStoreFile, String password, String keyPassword, + File trustStoreFile, String trustStorePassword, boolean useClientCert) { + Map sslConfigs = new HashMap(); + sslConfigs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL"); // kafka security protocol + sslConfigs.put(CommonClientConfigs.SSL_PROTOCOL_CONFIG, "TLS"); // protocol to create SSLContext + + if (mode == SSLFactory.Mode.SERVER || (mode == SSLFactory.Mode.CLIENT && keyStoreFile != null)) { + sslConfigs.put(CommonClientConfigs.SSL_KEYSTORE_LOCATION_CONFIG, keyStoreFile.getPath()); + sslConfigs.put(CommonClientConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS"); + sslConfigs.put(CommonClientConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, "SunX509"); + sslConfigs.put(CommonClientConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, password); + sslConfigs.put(CommonClientConfigs.SSL_KEY_PASSWORD_CONFIG, keyPassword); + } + + sslConfigs.put(CommonClientConfigs.SSL_CLIENT_REQUIRE_CERT_CONFIG, useClientCert); + sslConfigs.put(CommonClientConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, trustStoreFile.getPath()); + sslConfigs.put(CommonClientConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, trustStorePassword); + sslConfigs.put(CommonClientConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "JKS"); + sslConfigs.put(CommonClientConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, "SunX509"); + return sslConfigs; + } + + public static Map> createSSLConfigs(boolean useClientCert, boolean trustStore) + throws IOException, GeneralSecurityException { + Map> sslConfigs = new HashMap>(); + Map certs = new HashMap(); + File trustStoreFile = File.createTempFile("truststore", ".jks"); + File clientKeyStoreFile = null; + File serverKeyStoreFile = File.createTempFile("serverKS", ".jks"); + String clientPassword = "ClientPassword"; + String serverPassword = "ServerPassword"; + String trustStorePassword = "TrustStorePassword"; + + if (useClientCert) { + clientKeyStoreFile = File.createTempFile("clientKS", ".jks"); + KeyPair cKP = generateKeyPair("RSA"); + X509Certificate cCert = generateCertificate("CN=localhost, O=client", cKP, 30, "SHA1withRSA"); + createKeyStore(clientKeyStoreFile.getPath(), clientPassword, "client", cKP.getPrivate(), cCert); + certs.put("client", cCert); + } + + KeyPair sKP = generateKeyPair("RSA"); + X509Certificate sCert = generateCertificate("CN=localhost, O=server", sKP, 30, + "SHA1withRSA"); + createKeyStore(serverKeyStoreFile.getPath(), serverPassword, serverPassword, "server", sKP.getPrivate(), sCert); + certs.put("server", sCert); + + if (trustStore) { + createTrustStore(trustStoreFile.getPath(), trustStorePassword, certs); } - securityConfigProps.put(SecurityConfig.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, "SunX509"); - return new SecurityConfig(securityConfigProps); + Map clientSSLConfig = createSSLConfig(SSLFactory.Mode.CLIENT, clientKeyStoreFile, clientPassword, + clientPassword, trustStoreFile, trustStorePassword, useClientCert); + Map serverSSLConfig = createSSLConfig(SSLFactory.Mode.SERVER, serverKeyStoreFile, serverPassword, + serverPassword, trustStoreFile, trustStorePassword, useClientCert); + sslConfigs.put(SSLFactory.Mode.CLIENT, clientSSLConfig); + sslConfigs.put(SSLFactory.Mode.SERVER, serverSSLConfig); + return sslConfigs; } } -- 2.4.6 From 98a90ae9d80ea8f5ab4780569d1c4e301dd16c4e Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Sun, 10 May 2015 23:18:13 -0700 Subject: [PATCH 03/30] KAFKA-1690. new java producer needs ssl support as a client. --- .../src/main/java/org/apache/kafka/clients/ClientUtils.java | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index 3657279..748576b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -13,21 +13,17 @@ package org.apache.kafka.clients; import java.io.Closeable; -import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.List; -import java.util.Properties; import java.util.concurrent.atomic.AtomicReference; import org.apache.kafka.common.config.ConfigException; -import org.apache.kafka.common.config.SecurityConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.kafka.common.utils.Utils.getHost; import static org.apache.kafka.common.utils.Utils.getPort; -import static org.apache.kafka.common.utils.Utils.loadProps; public class ClientUtils { @@ -67,11 +63,4 @@ public class ClientUtils { } } - public static SecurityConfig parseSecurityConfig(String securityConfigFile) throws IOException { - Properties securityProps = new Properties(); - if (securityConfigFile != null && securityConfigFile != "") { - securityProps = loadProps(securityConfigFile); - } - return new SecurityConfig(securityProps); - } } -- 2.4.6 From 804da7a015be2f98a1bb867ee5d42aa8009a37dd Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Sun, 10 May 2015 23:31:25 -0700 Subject: [PATCH 04/30] KAFKA-1690. new java producer needs ssl support as a client. --- .../main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java index 4ccd423..85fdc09 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java @@ -157,11 +157,6 @@ public class ConsumerConfig extends AbstractConfig { public static final String VALUE_DESERIALIZER_CLASS_CONFIG = "value.deserializer"; private static final String VALUE_DESERIALIZER_CLASS_DOC = "Deserializer class for value that implements the Deserializer interface."; - /** security.config.file */ - public static final String SECURITY_CONFIG_FILE_CONFIG = CommonClientConfigs.SECURITY_CONFIG_FILE_CONFIG; - private static final String SECURITY_CONFIG_FILE_DOC = CommonClientConfigs.SECURITY_CONFIG_FILE_DOC; - - static { CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, -- 2.4.6 From ee16e8e6f92ac2baf0e41d3019b7f8aef39b1506 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Mon, 11 May 2015 16:09:01 -0700 Subject: [PATCH 05/30] KAFKA-1690. new java producer needs ssl support as a client. SSLFactory tests. --- .../kafka/clients/consumer/ConsumerConfig.java | 2 +- .../kafka/clients/producer/ProducerConfig.java | 2 +- .../apache/kafka/common/network/SSLFactory.java | 16 +++++-- .../kafka/common/network/SSLFactoryTest.java | 56 ++++++++++++++++++++++ .../java/org/apache/kafka/test/TestSSLUtils.java | 7 +++ 5 files changed, 76 insertions(+), 7 deletions(-) create mode 100644 clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java index 85fdc09..95f3a46 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java @@ -280,7 +280,7 @@ public class ConsumerConfig extends AbstractConfig { VALUE_DESERIALIZER_CLASS_DOC) .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, SecurityProtocol.PLAINTEXT.toString(), Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) .define(CommonClientConfigs.SSL_PROTOCOL_CONFIG, Type.STRING, "TLS", Importance.MEDIUM, CommonClientConfigs.SSL_PROTOCOL_DOC) - .define(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG, Type.LIST, "", Importance.LOW, CommonClientConfigs.SSL_CIPHER_SUITES_DOC) + .define(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG, Type.LIST, Importance.LOW, CommonClientConfigs.SSL_CIPHER_SUITES_DOC, false) .define(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Type.LIST, "TLSv1.2, TLSv1.1, TLSv1", Importance.MEDIUM, CommonClientConfigs.SSL_ENABLED_PROTOCOLS_DOC) .define(CommonClientConfigs.SSL_KEYSTORE_TYPE_CONFIG, Type.STRING, "JKS", Importance.MEDIUM, CommonClientConfigs.SSL_KEYSTORE_TYPE_DOC) .define(CommonClientConfigs.SSL_KEYSTORE_LOCATION_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_KEYSTORE_LOCATION_DOC, false) diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java index 69563ee..552cafb 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java @@ -223,7 +223,7 @@ public class ProducerConfig extends AbstractConfig { .define(VALUE_SERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, VALUE_SERIALIZER_CLASS_DOC) .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, SecurityProtocol.PLAINTEXT.toString(), Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) .define(CommonClientConfigs.SSL_PROTOCOL_CONFIG, Type.STRING, "TLS", Importance.MEDIUM, CommonClientConfigs.SSL_PROTOCOL_DOC) - .define(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG, Type.LIST, "", Importance.LOW, CommonClientConfigs.SSL_CIPHER_SUITES_DOC) + .define(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG, Type.LIST, Importance.LOW, CommonClientConfigs.SSL_CIPHER_SUITES_DOC, false) .define(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Type.LIST, "TLSv1.2, TLSv1.1, TLSv1", Importance.MEDIUM, CommonClientConfigs.SSL_ENABLED_PROTOCOLS_DOC) .define(CommonClientConfigs.SSL_KEYSTORE_TYPE_CONFIG, Type.STRING, "JKS", Importance.MEDIUM, CommonClientConfigs.SSL_KEYSTORE_TYPE_DOC) .define(CommonClientConfigs.SSL_KEYSTORE_LOCATION_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_KEYSTORE_LOCATION_DOC, false) diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java index 73f976b..7fbb9d7 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java @@ -54,10 +54,17 @@ public class SSLFactory implements Configurable { @Override public void configure(Map configs) throws KafkaException { this.protocol = (String) configs.get(CommonClientConfigs.SSL_PROTOCOL_CONFIG); - if (configs.get(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG) != null) - this.cipherSuites = (String[]) ((List) configs.get(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG)).toArray(); - if (configs.get(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG) != null) - this.enabledProtocols = (String[]) ((List) configs.get(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG)).toArray(); + + if (configs.get(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG) != null) { + List cipherSuitesList = (List) configs.get(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG); + this.cipherSuites = (String[]) cipherSuitesList.toArray(new String[cipherSuitesList.size()]); + } + + if (configs.get(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG) != null) { + List enabledProtocolsList = (List) configs.get(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG); + this.enabledProtocols = (String[]) enabledProtocolsList.toArray(new String[enabledProtocolsList.size()]); + } + this.requireClientCert = (Boolean) configs.get(CommonClientConfigs.SSL_CLIENT_REQUIRE_CERT_CONFIG); this.kmfAlgorithm = (String) configs.get(CommonClientConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG); this.tmfAlgorithm = (String) configs.get(CommonClientConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG); @@ -73,7 +80,6 @@ public class SSLFactory implements Configurable { try { this.sslContext = createSSLContext(); } catch (Exception e) { - e.printStackTrace(); throw new KafkaException(e); } } diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java new file mode 100644 index 0000000..02a3eff --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package org.apache.kafka.common.network; + +import javax.net.ssl.*; + +import java.util.Map; + +import org.apache.kafka.test.TestSSLUtils; + +import org.junit.Test; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + + +/** + * A set of tests for the selector over ssl. These use a test harness that runs a simple socket server that echos back responses. + */ + +public class SSLFactoryTest { + + @Test + public void testSSLFactoryConfiguration() throws Exception { + Map> sslConfigs = TestSSLUtils.createSSLConfigs(false, true); + Map serverSSLConfig = sslConfigs.get(SSLFactory.Mode.SERVER); + SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER); + sslFactory.configure(serverSSLConfig); + SSLEngine engine = sslFactory.createSSLEngine("localhost", 9093); + assertNotNull(engine); + String[] expectedProtocols = {"TLSv1.2"}; + assertEquals(expectedProtocols, engine.getEnabledProtocols()); + assertEquals(false, engine.getUseClientMode()); + } + + @Test + public void testClientMode() throws Exception { + Map> sslConfigs = TestSSLUtils.createSSLConfigs(false, true); + Map clientSSLConfig = sslConfigs.get(SSLFactory.Mode.CLIENT); + SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT); + sslFactory.configure(clientSSLConfig); + SSLEngine engine = sslFactory.createSSLEngine("localhost", 9093); + assertTrue(engine.getUseClientMode()); + } + +} diff --git a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java index bfb52ef..590f1f5 100644 --- a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java +++ b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java @@ -43,6 +43,8 @@ import org.bouncycastle.x509.X509V1CertificateGenerator; import java.util.Date; import java.util.HashMap; import java.util.Map; +import java.util.List; +import java.util.ArrayList; public class TestSSLUtils { @@ -177,6 +179,11 @@ public class TestSSLUtils { sslConfigs.put(CommonClientConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, trustStorePassword); sslConfigs.put(CommonClientConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "JKS"); sslConfigs.put(CommonClientConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, "SunX509"); + + List enabledProtocols = new ArrayList(); + enabledProtocols.add("TLSv1.2"); + sslConfigs.put(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, enabledProtocols); + return sslConfigs; } -- 2.4.6 From 2dd826be4a6ebe7064cb19ff21fe23950a1bafc2 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Tue, 12 May 2015 16:09:38 -0700 Subject: [PATCH 06/30] KAFKA-1690. new java producer needs ssl support as a client. Added PrincipalBuilder. --- checkstyle/checkstyle.xml | 25 +++++----- checkstyle/import-control.xml | 17 ++++--- .../apache/kafka/common/network/Authenticator.java | 18 +++----- .../org/apache/kafka/common/network/Channel.java | 7 ++- .../kafka/common/network/DefaultAuthenticator.java | 21 ++++++--- .../common/network/PlainTextTransportLayer.java | 17 ++++--- .../kafka/common/network/SSLTransportLayer.java | 11 ++++- .../org/apache/kafka/common/network/Selector.java | 5 +- .../kafka/common/network/TransportLayer.java | 15 +++++- .../security/auth/DefaultPrincipalBuilder.java | 37 +++++++++++++++ .../kafka/common/security/auth/KafkaPrincipal.java | 54 ++++++++++++++++++++++ .../common/security/auth/PrincipalBuilder.java | 38 +++++++++++++++ 12 files changed, 212 insertions(+), 53 deletions(-) create mode 100644 clients/src/main/java/org/apache/kafka/common/security/auth/DefaultPrincipalBuilder.java create mode 100644 clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipal.java create mode 100644 clients/src/main/java/org/apache/kafka/common/security/auth/PrincipalBuilder.java diff --git a/checkstyle/checkstyle.xml b/checkstyle/checkstyle.xml index 5fbf562..5031415 100644 --- a/checkstyle/checkstyle.xml +++ b/checkstyle/checkstyle.xml @@ -1,6 +1,6 @@ +--> - + - + - + - + + - + @@ -59,12 +60,12 @@ - + - + @@ -79,4 +80,4 @@ - \ No newline at end of file + diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index 339c620..a921757 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -26,7 +26,6 @@ - @@ -41,13 +40,9 @@ - - - - @@ -57,12 +52,17 @@ + + + + - - - + + + + @@ -91,7 +91,6 @@ - diff --git a/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java b/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java index 1b6b32a..920ed88 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java @@ -22,39 +22,33 @@ package org.apache.kafka.common.network; */ import java.io.IOException; -import com.sun.security.auth.UserPrincipal; +import java.security.Principal; +import org.apache.kafka.common.KafkaException; public interface Authenticator { /** - * Closes this channel + * Closes this Authenticator * * @throws IOException if any I/O error occurs */ void close() throws IOException; /** - * - * @throws IOException - */ - void init() throws IOException; - - /** - * Returns UserPrincipal after authentication is established + * Returns Principal after authentication is established */ - UserPrincipal userPrincipal() throws IOException; - + Principal principal() throws KafkaException; /** * Does authentication and returns SelectionKey.OP if further communication needed + * If no further authentication needs to be done return 0. */ int authenticate(boolean read, boolean write) throws IOException; /** * returns true if authentication is complete otherwise returns false; */ - boolean isComplete(); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/Channel.java b/clients/src/main/java/org/apache/kafka/common/network/Channel.java index d9d1192..7b2489e 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Channel.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Channel.java @@ -27,7 +27,7 @@ import java.nio.channels.ScatteringByteChannel; import java.nio.channels.GatheringByteChannel; import java.nio.channels.SocketChannel; -import com.sun.security.auth.UserPrincipal; +import java.security.Principal; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,7 +45,6 @@ public class Channel implements ScatteringByteChannel, GatheringByteChannel { public Channel(TransportLayer transportLayer, Authenticator authenticator) throws IOException { this.transportLayer = transportLayer; this.authenticator = authenticator; - this.authenticator.init(); } public void close() throws IOException { @@ -58,8 +57,8 @@ public class Channel implements ScatteringByteChannel, GatheringByteChannel { * Incase of PLAINTEXT and No Authentication returns ANONYMOUS as the userPrincipal * If SSL used without any SASL Authentication returns SSLSession.peerPrincipal */ - public UserPrincipal userPrincipal() throws IOException { - return authenticator.userPrincipal(); + public Principal principal() throws IOException { + return authenticator.principal(); } public int connect(boolean read, boolean write) throws IOException { diff --git a/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java index 97b1135..3a66e7b 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java @@ -17,25 +17,34 @@ package org.apache.kafka.common.network; -import com.sun.security.auth.UserPrincipal; +import java.security.Principal; import java.io.IOException; +import org.apache.kafka.common.security.auth.PrincipalBuilder; +import org.apache.kafka.common.KafkaException; + public class DefaultAuthenticator implements Authenticator { TransportLayer transportLayer; + PrincipalBuilder principalBuilder; + Principal principal; - public DefaultAuthenticator(TransportLayer transportLayer) { + public DefaultAuthenticator(TransportLayer transportLayer, PrincipalBuilder principalBuilder) { this.transportLayer = transportLayer; + this.principalBuilder = principalBuilder; } - public void init() {} - + /* + * No-Op for default authenticator + */ public int authenticate(boolean read, boolean write) throws IOException { return 0; } - public UserPrincipal userPrincipal() throws IOException { - return new UserPrincipal(transportLayer.getPeerPrincipal().toString()); + public Principal principal() throws KafkaException { + if (principal != null) + principal = principalBuilder.buildPrincipal(transportLayer, this); + return principal; } public void close() throws IOException {} diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java index dbf0a30..17b014f 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java @@ -22,14 +22,15 @@ package org.apache.kafka.common.network; */ import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.SocketChannel; - import java.io.DataInputStream; import java.io.DataOutputStream; +import java.nio.ByteBuffer; +import java.nio.channels.SocketChannel; +import javax.net.ssl.SSLSession; import java.security.Principal; -import com.sun.security.auth.UserPrincipal; + +import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -140,8 +141,12 @@ public class PlainTextTransportLayer implements TransportLayer { return outStream; } - public Principal getPeerPrincipal() throws IOException { - return new UserPrincipal("ANONYMOUS"); + public Principal peerPrincipal() throws IOException { + return new KafkaPrincipal("ANONYMOUS"); + } + + public SSLSession sslSession() throws IllegalStateException, UnsupportedOperationException { + throw new UnsupportedOperationException("sslSession not supported for PlainTextTransportLayer"); } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index 2d6a519..0267e85 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -28,6 +28,7 @@ import javax.net.ssl.SSLEngineResult; import javax.net.ssl.SSLEngineResult.HandshakeStatus; import javax.net.ssl.SSLEngineResult.Status; import javax.net.ssl.SSLException; +import javax.net.ssl.SSLSession; import javax.net.ssl.SSLPeerUnverifiedException; import java.io.DataInputStream; @@ -267,7 +268,7 @@ public class SSLTransportLayer implements TransportLayer { sslEngine.closeOutbound(); if (!flush(netOutBuffer)) { - throw new IOException("Remaining data in the network buffer, can't send SSL close message, force a close with close(true) instead"); + throw new IOException("Remaining data in the network buffer, can't send SSL close message."); } //prep the buffer for the close message netOutBuffer.clear(); @@ -400,7 +401,7 @@ public class SSLTransportLayer implements TransportLayer { return outStream; } - public Principal getPeerPrincipal() throws IOException { + public Principal peerPrincipal() throws IOException { try { return sslEngine.getSession().getPeerPrincipal(); } catch (SSLPeerUnverifiedException se) { @@ -408,6 +409,12 @@ public class SSLTransportLayer implements TransportLayer { } } + public SSLSession sslSession() throws IllegalStateException, UnsupportedOperationException { + if (!handshakeComplete) + throw new IllegalStateException("Handshake incomplete."); + return sslEngine.getSession(); + } + private int readFromAppBuffer(ByteBuffer dst) { appReadBuffer.flip(); try { diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index 90e2cee..ea95858 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -43,6 +43,8 @@ import org.apache.kafka.common.metrics.stats.Count; import org.apache.kafka.common.metrics.stats.Max; import org.apache.kafka.common.metrics.stats.Rate; import org.apache.kafka.common.protocol.SecurityProtocol; +import org.apache.kafka.common.security.auth.PrincipalBuilder; +import org.apache.kafka.common.security.auth.DefaultPrincipalBuilder; import org.apache.kafka.common.utils.Time; import org.apache.kafka.clients.CommonClientConfigs; import org.slf4j.Logger; @@ -169,7 +171,8 @@ public class Selector implements Selectable { } else { transportLayer = new PlainTextTransportLayer(socketChannel); } - Authenticator authenticator = new DefaultAuthenticator(transportLayer); + PrincipalBuilder principalBuilder = new DefaultPrincipalBuilder(); + Authenticator authenticator = new DefaultAuthenticator(transportLayer, principalBuilder); Channel channel = new Channel(transportLayer, authenticator); SelectionKey key = socketChannel.register(this.selector, SelectionKey.OP_CONNECT); key.attach(new Transmissions(id)); diff --git a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java index ae10f7c..0531d6f 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java @@ -27,6 +27,7 @@ import java.nio.channels.SocketChannel; import java.io.DataInputStream; import java.io.DataOutputStream; +import javax.net.ssl.SSLSession; import java.security.Principal; @@ -82,5 +83,17 @@ public interface TransportLayer { boolean flush(ByteBuffer buffer) throws IOException; - Principal getPeerPrincipal() throws IOException; + + /** + * returns SSLSession.getPeerPrinicpal if SSLTransportLayer used + * for non-secure returns a "ANONYMOUS" as the peerPrincipal + */ + Principal peerPrincipal() throws IOException; + + /** + * returns a SSL Session after the handshake is established + * throws IlleagalStateException if the handshake is not established + * throws UnsupportedOperationException for non-secure implementation + */ + SSLSession sslSession() throws IllegalStateException, UnsupportedOperationException; } diff --git a/clients/src/main/java/org/apache/kafka/common/security/auth/DefaultPrincipalBuilder.java b/clients/src/main/java/org/apache/kafka/common/security/auth/DefaultPrincipalBuilder.java new file mode 100644 index 0000000..d594e4d --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/auth/DefaultPrincipalBuilder.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.auth; + +import java.security.Principal; + +import org.apache.kafka.common.network.TransportLayer; +import org.apache.kafka.common.network.Authenticator; +import org.apache.kafka.common.KafkaException; + + +public class DefaultPrincipalBuilder implements PrincipalBuilder { + + public Principal buildPrincipal(TransportLayer transportLayer, Authenticator authenticator) throws KafkaException { + try { + return transportLayer.peerPrincipal(); + } catch (Exception e) { + throw new KafkaException("Failed to build principal due to: ", e); + } + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipal.java b/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipal.java new file mode 100644 index 0000000..4a7ace8 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipal.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.auth; + +import java.security.Principal; + +public class KafkaPrincipal implements Principal { + private final String name; + + public KafkaPrincipal(String name) { + if (name == null) + throw new IllegalArgumentException("name is null"); + this.name = name; + } + + public boolean equals(Object object) { + if (this == object) + return true; + + if (object instanceof KafkaPrincipal) { + return name.equals(((KafkaPrincipal) object).getName()); + } + + return false; + } + + public int hashCode() { + return name.hashCode(); + } + + public String getName() { + return name; + } + + public String toString() { + return name; + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/auth/PrincipalBuilder.java b/clients/src/main/java/org/apache/kafka/common/security/auth/PrincipalBuilder.java new file mode 100644 index 0000000..5b39222 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/auth/PrincipalBuilder.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.auth; + +/* + * PrincipalBuilder for Authenticator + */ +import org.apache.kafka.common.network.TransportLayer; +import org.apache.kafka.common.network.Authenticator; +import org.apache.kafka.common.KafkaException; + +import java.security.Principal; + +public interface PrincipalBuilder { + + /** + * Returns Principal + * @param TransportLayer + * @param Authenticator + */ + Principal buildPrincipal(TransportLayer transportLayer, Authenticator authenticator) throws KafkaException; + +} -- 2.4.6 From 2cddad80f6a4a961b6932879448e532dab4e637e Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Fri, 15 May 2015 07:17:37 -0700 Subject: [PATCH 07/30] KAFKA-1690. new java producer needs ssl support as a client. Addressing reviews. --- build.gradle | 4 +- checkstyle/import-control.xml | 15 +- .../java/org/apache/kafka/clients/ClientUtils.java | 25 ++ .../apache/kafka/clients/CommonClientConfigs.java | 55 --- .../kafka/clients/consumer/ConsumerConfig.java | 33 +- .../kafka/clients/consumer/KafkaConsumer.java | 5 +- .../kafka/clients/producer/KafkaProducer.java | 7 +- .../kafka/clients/producer/ProducerConfig.java | 32 +- .../kafka/common/config/SecurityConfigs.java | 106 ++++++ .../apache/kafka/common/network/Authenticator.java | 8 +- .../org/apache/kafka/common/network/Channel.java | 48 ++- .../kafka/common/network/ChannelBuilder.java | 43 +++ .../kafka/common/network/DefaultAuthenticator.java | 24 +- .../common/network/PlainTextChannelBuilder.java | 57 +++ .../common/network/PlainTextTransportLayer.java | 63 ++-- .../kafka/common/network/SSLChannelBuilder.java | 67 ++++ .../apache/kafka/common/network/SSLFactory.java | 95 ++--- .../kafka/common/network/SSLTransportLayer.java | 401 +++++++++++++-------- .../org/apache/kafka/common/network/Selector.java | 91 ++--- .../kafka/common/network/TransportLayer.java | 51 +-- .../kafka/common/protocol/SecurityProtocol.java | 4 +- .../security/auth/DefaultPrincipalBuilder.java | 6 + .../kafka/common/security/auth/KafkaPrincipal.java | 4 + .../common/security/auth/PrincipalBuilder.java | 15 +- .../java/org/apache/kafka/common/utils/Utils.java | 42 ++- .../apache/kafka/common/network/EchoServer.java | 21 +- .../kafka/common/network/SSLFactoryTest.java | 17 +- .../kafka/common/network/SSLSelectorTest.java | 240 +++++++++++- .../apache/kafka/common/network/SelectorTest.java | 42 ++- .../java/org/apache/kafka/test/TestSSLUtils.java | 106 +++--- 30 files changed, 1208 insertions(+), 519 deletions(-) create mode 100644 clients/src/main/java/org/apache/kafka/common/config/SecurityConfigs.java create mode 100644 clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java create mode 100644 clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java create mode 100644 clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java diff --git a/build.gradle b/build.gradle index 4e83d7d..3633152 100644 --- a/build.gradle +++ b/build.gradle @@ -354,8 +354,8 @@ project(':clients') { compile "org.slf4j:slf4j-api:1.7.6" compile 'org.xerial.snappy:snappy-java:1.1.1.6' compile 'net.jpountz.lz4:lz4:1.2.0' - compile 'org.bouncycastle:bcprov-jdk16:1.46' + testCompile 'org.bouncycastle:bcpkix-jdk15on:1.52' testCompile 'com.novocode:junit-interface:0.9' testRuntime "$slf4jlog4j" } @@ -385,7 +385,7 @@ project(':clients') { } checkstyle { - configFile = new File(rootDir, "checkstyle/checkstyle.xml") + configFile = new File(rootDir, "checkstyle/checkstyle.xml") } test.dependsOn('checkstyleMain', 'checkstyleTest') } diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index a921757..1ebe211 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -52,16 +52,15 @@ - - - - + + + - - - + + + @@ -108,7 +107,7 @@ - + diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index 748576b..3e92d6d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -16,8 +16,14 @@ import java.io.Closeable; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.concurrent.atomic.AtomicReference; +import org.apache.kafka.common.protocol.SecurityProtocol; +import org.apache.kafka.common.network.ChannelBuilder; +import org.apache.kafka.common.network.SSLChannelBuilder; +import org.apache.kafka.common.network.PlainTextChannelBuilder; +import org.apache.kafka.common.config.SecurityConfigs; import org.apache.kafka.common.config.ConfigException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,4 +69,23 @@ public class ClientUtils { } } + /** + * @param configs client/server configs + * returns ChannelBuilder configured channelBuilder based on the configs. + */ + public static ChannelBuilder createChannelBuilder(Map configs) { + ChannelBuilder channelBuilder = null; + SecurityProtocol securityProtocol = configs.containsKey(SecurityConfigs.SECURITY_PROTOCOL_CONFIG) ? + SecurityProtocol.valueOf((String) configs.get(SecurityConfigs.SECURITY_PROTOCOL_CONFIG)) : SecurityProtocol.PLAINTEXT; + + if (securityProtocol == SecurityProtocol.SSL) { + channelBuilder = new SSLChannelBuilder(); + } else { + channelBuilder = new PlainTextChannelBuilder(); + } + + channelBuilder.configure(configs); + return channelBuilder; + } + } diff --git a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java index ead3826..16507c7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java +++ b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java @@ -55,59 +55,4 @@ public class CommonClientConfigs { public static final String METRIC_REPORTER_CLASSES_CONFIG = "metric.reporters"; public static final String METRIC_REPORTER_CLASSES_DOC = "A list of classes to use as metrics reporters. Implementing the MetricReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics."; - public static final String SECURITY_CONFIG_FILE_CONFIG = "security.config.file"; - public static final String SECURITY_CONFIG_FILE_DOC = "Kafka client security related config file."; - - public static final String SECURITY_PROTOCOL_CONFIG = "security.protocol"; - public static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. Currently only PLAINTEXT and SSL are supported."; - - public static final String SSL_PROTOCOL_CONFIG = "ssl.protocol"; - public static final String SSL_PROTOCOL_DOC = "The ssl protocol used to generate SSLContext." - + "Default setting is TLS. Allowed values are SSL, SSLv2, SSLv3, TLS, TLSv1.1, TLSv1.2"; - - public static final String SSL_CIPHER_SUITES_CONFIG = "ssl.cipher.suites"; - public static final String SSL_CIPHER_SUITES_DOC = "A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol." - + "By default all the available cipher suites are supported."; - - public static final String SSL_ENABLED_PROTOCOLS_CONFIG = "ssl.enabled.protocols"; - public static final String SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. " - + "All versions of TLS is enabled by default."; - - public static final String SSL_KEYSTORE_TYPE_CONFIG = "ssl.keystore.type"; - public static final String SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. " - + "This is optional for client. Default value is JKS"; - - public static final String SSL_KEYSTORE_LOCATION_CONFIG = "ssl.keystore.location"; - public static final String SSL_KEYSTORE_LOCATION_DOC = "The location of the key store file. " - + "This is optional for Client and can be used for two-way authentication for client."; - - public static final String SSL_KEYSTORE_PASSWORD_CONFIG = "ssl.keystore.password"; - public static final String SSL_KEYSTORE_PASSWORD_DOC = "The store password for the key store file. "; - - public static final String SSL_KEY_PASSWORD_CONFIG = "ssl.key.password"; - public static final String SSL_KEY_PASSWORD_DOC = "The password of the private key in the key store file. " - + "This is optional for client."; - - public static final String SSL_TRUSTSTORE_TYPE_CONFIG = "ssl.truststore.type"; - public static final String SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. " - + "Default value is JKS."; - - public static final String SSL_TRUSTSTORE_LOCATION_CONFIG = "ssl.truststore.location"; - public static final String SSL_TRUSTSTORE_LOCATION_DOC = "The location of the trust store file. "; - - public static final String SSL_TRUSTSTORE_PASSWORD_CONFIG = "ssl.truststore.password"; - public static final String SSL_TRUSTSTORE_PASSWORD_DOC = "The password for the trust store file. "; - - public static final String SSL_CLIENT_REQUIRE_CERT_CONFIG = "ssl.client.require.cert"; - public static final String SSL_CLIENT_REQUIRE_CERT_DOC = "This is to enforce two-way authentication between client and server." - + "Default value is false. If set to true client need to prover Keystrore releated config"; - - public static final String SSL_KEYMANAGER_ALGORITHM_CONFIG = "ssl.keymanager.algorithm"; - public static final String SSL_KEYMANAGER_ALGORITHM_DOC = "The algorithm used by key manager factory for SSL connections. " - + "Default value is the key manager factory algorithm configured for the Java Virtual Machine."; - - public static final String SSL_TRUSTMANAGER_ALGORITHM_CONFIG = "ssl.trustmanager.algorithm"; - public static final String SSL_TRUSTMANAGER_ALGORITHM_DOC = "The algorithm used by trust manager factory for SSL connections. " - + "Default value is the trust manager factory algorithm configured for the Java Virtual Machine."; - } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java index 95f3a46..dff4258 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java @@ -19,7 +19,7 @@ import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Importance; import org.apache.kafka.common.config.ConfigDef.Type; import org.apache.kafka.common.serialization.Deserializer; -import org.apache.kafka.common.protocol.SecurityProtocol; +import org.apache.kafka.common.config.SecurityConfigs; import java.util.HashMap; import java.util.Map; @@ -278,20 +278,23 @@ public class ConsumerConfig extends AbstractConfig { Type.CLASS, Importance.HIGH, VALUE_DESERIALIZER_CLASS_DOC) - .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, SecurityProtocol.PLAINTEXT.toString(), Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) - .define(CommonClientConfigs.SSL_PROTOCOL_CONFIG, Type.STRING, "TLS", Importance.MEDIUM, CommonClientConfigs.SSL_PROTOCOL_DOC) - .define(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG, Type.LIST, Importance.LOW, CommonClientConfigs.SSL_CIPHER_SUITES_DOC, false) - .define(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Type.LIST, "TLSv1.2, TLSv1.1, TLSv1", Importance.MEDIUM, CommonClientConfigs.SSL_ENABLED_PROTOCOLS_DOC) - .define(CommonClientConfigs.SSL_KEYSTORE_TYPE_CONFIG, Type.STRING, "JKS", Importance.MEDIUM, CommonClientConfigs.SSL_KEYSTORE_TYPE_DOC) - .define(CommonClientConfigs.SSL_KEYSTORE_LOCATION_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_KEYSTORE_LOCATION_DOC, false) - .define(CommonClientConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_KEYSTORE_PASSWORD_DOC, false) - .define(CommonClientConfigs.SSL_KEY_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_KEY_PASSWORD_DOC, false) - .define(CommonClientConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, Type.STRING, "JKS", Importance.MEDIUM, CommonClientConfigs.SSL_TRUSTSTORE_TYPE_DOC) - .define(CommonClientConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_TRUSTSTORE_LOCATION_DOC, false) - .define(CommonClientConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, CommonClientConfigs.SSL_TRUSTSTORE_PASSWORD_DOC, false) - .define(CommonClientConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, Type.STRING, "SunX509", Importance.LOW, CommonClientConfigs.SSL_KEYMANAGER_ALGORITHM_DOC) - .define(CommonClientConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, Type.STRING, "SunX509", Importance.LOW, CommonClientConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC) - .define(CommonClientConfigs.SSL_CLIENT_REQUIRE_CERT_CONFIG, Type.BOOLEAN, false, Importance.MEDIUM, CommonClientConfigs.SSL_CLIENT_REQUIRE_CERT_DOC); + .define(SecurityConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SECURITY_PROTOCOL, Importance.MEDIUM, SecurityConfigs.SECURITY_PROTOCOL_DOC) + .define(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Type.CLASS, SecurityConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS, Importance.LOW, SecurityConfigs.PRINCIPAL_BUILDER_CLASS_DOC) + .define(SecurityConfigs.SSL_PROTOCOL_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_PROTOCOL, Importance.MEDIUM, SecurityConfigs.SSL_PROTOCOL_DOC) + .define(SecurityConfigs.SSL_PROVIDER_CONFIG, Type.STRING, Importance.MEDIUM, SecurityConfigs.SSL_PROVIDER_DOC, false) + .define(SecurityConfigs.SSL_CIPHER_SUITES_CONFIG, Type.LIST, Importance.LOW, SecurityConfigs.SSL_CIPHER_SUITES_DOC, false) + .define(SecurityConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Type.LIST, SecurityConfigs.DEFAULT_ENABLED_PROTOCOLS, Importance.MEDIUM, SecurityConfigs.SSL_ENABLED_PROTOCOLS_DOC) + .define(SecurityConfigs.SSL_KEYSTORE_TYPE_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_KEYSTORE_TYPE, Importance.MEDIUM, SecurityConfigs.SSL_KEYSTORE_TYPE_DOC) + .define(SecurityConfigs.SSL_KEYSTORE_LOCATION_CONFIG, Type.STRING, Importance.HIGH, SecurityConfigs.SSL_KEYSTORE_LOCATION_DOC, false) + .define(SecurityConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, SecurityConfigs.SSL_KEYSTORE_PASSWORD_DOC, false) + .define(SecurityConfigs.SSL_KEY_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, SecurityConfigs.SSL_KEY_PASSWORD_DOC, false) + .define(SecurityConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE, Importance.MEDIUM, SecurityConfigs.SSL_TRUSTSTORE_TYPE_DOC) + .define(SecurityConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_TRUSTSTORE_LOCATION, Importance.HIGH, SecurityConfigs.SSL_TRUSTSTORE_LOCATION_DOC) + .define(SecurityConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_TRUSTSTORE_PASSWORD, Importance.HIGH, SecurityConfigs.SSL_TRUSTSTORE_PASSWORD_DOC) + .define(SecurityConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM, Importance.LOW, SecurityConfigs.SSL_KEYMANAGER_ALGORITHM_DOC) + .define(SecurityConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM, Importance.LOW, SecurityConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC) + .define(SecurityConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, Type.STRING, Importance.LOW, SecurityConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC, false); + } public static Map addDeserializerToConfig(Map configs, diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java index 375669f..55902ff 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java @@ -44,6 +44,7 @@ import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.MetricsReporter; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.network.Selector; +import org.apache.kafka.common.network.ChannelBuilder; import org.apache.kafka.common.utils.SystemTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; @@ -471,8 +472,8 @@ public class KafkaConsumer implements Consumer { String metricGrpPrefix = "consumer"; Map metricsTags = new LinkedHashMap(); metricsTags.put("client-id", clientId); - - this.client = new NetworkClient(new Selector(metrics, time, metricGrpPrefix, metricsTags, config.values()), + ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config.values()); + this.client = new NetworkClient(new Selector(metrics, time, metricGrpPrefix, metricsTags, channelBuilder), this.metadata, clientId, 100, // a fixed large enough value will suffice diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java index 1650d85..85a317c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java @@ -44,6 +44,7 @@ import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.MetricsReporter; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.network.Selector; +import org.apache.kafka.common.network.ChannelBuilder; import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.record.Record; import org.apache.kafka.common.record.Records; @@ -227,8 +228,8 @@ public class KafkaProducer implements Producer { metricTags); List addresses = ClientUtils.parseAndValidateAddresses(config.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)); this.metadata.update(Cluster.bootstrap(addresses), time.milliseconds()); - - NetworkClient client = new NetworkClient(new Selector(this.metrics, time, "producer", metricTags, config.values()), + ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config.values()); + NetworkClient client = new NetworkClient(new Selector(this.metrics, time, "producer", metricTags, channelBuilder), this.metadata, clientId, config.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION), @@ -550,7 +551,7 @@ public class KafkaProducer implements Producer { public void close(long timeout, TimeUnit timeUnit) { close(timeout, timeUnit, false); } - + private void close(long timeout, TimeUnit timeUnit, boolean swallowException) { if (timeout < 0) throw new IllegalArgumentException("The timeout cannot be negative."); diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java index 552cafb..baa3d41 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java @@ -22,11 +22,11 @@ import java.util.Properties; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.common.config.SecurityConfigs; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Importance; import org.apache.kafka.common.config.ConfigDef.Type; import org.apache.kafka.common.serialization.Serializer; -import org.apache.kafka.common.protocol.SecurityProtocol; /** * Configuration for the Kafka Producer. Documentation for these configurations can be found in the
    addSerializerToConfig(Map configs, diff --git a/clients/src/main/java/org/apache/kafka/common/config/SecurityConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/SecurityConfigs.java new file mode 100644 index 0000000..1855399 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/config/SecurityConfigs.java @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package org.apache.kafka.common.config; + +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.KeyManagerFactory; + +public class SecurityConfigs { + /* + * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. + */ + + public static final String SECURITY_PROTOCOL_CONFIG = "security.protocol"; + public static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. Currently only PLAINTEXT and SSL are supported."; + public static final String DEFAULT_SECURITY_PROTOCOL = "PLAINTEXT"; + + public static final String PRINCIPAL_BUILDER_CLASS_CONFIG = "principal.builder.class"; + public static final String PRINCIPAL_BUILDER_CLASS_DOC = "principal builder to generate a java Principal. This config is optional for client."; + public static final String DEFAULT_PRINCIPAL_BUILDER_CLASS = "org.apache.kafka.common.security.auth.DefaultPrincipalBuilder"; + + public static final String SSL_PROTOCOL_CONFIG = "ssl.protocol"; + public static final String SSL_PROTOCOL_DOC = "The ssl protocol used to generate SSLContext." + + "Default setting is TLS. Allowed values are SSL, SSLv2, SSLv3, TLS, TLSv1.1, TLSv1.2"; + public static final String DEFAULT_SSL_PROTOCOL = "TLS"; + + public static final String SSL_PROVIDER_CONFIG = "ssl.provider"; + public static final String SSL_PROVIDER_DOC = "The name of the security provider used for SSL connections. Default value is the default security provider of the JVM."; + + public static final String SSL_CIPHER_SUITES_CONFIG = "ssl.cipher.suites"; + public static final String SSL_CIPHER_SUITES_DOC = "A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol." + + "By default all the available cipher suites are supported."; + + public static final String SSL_ENABLED_PROTOCOLS_CONFIG = "ssl.enabled.protocols"; + public static final String SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. " + + "All versions of TLS is enabled by default."; + public static final String DEFAULT_ENABLED_PROTOCOLS = "TLSv1.2,TLSv1.1,TLSv1"; + + public static final String SSL_KEYSTORE_TYPE_CONFIG = "ssl.keystore.type"; + public static final String SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. " + + "This is optional for client. Default value is JKS"; + public static final String DEFAULT_SSL_KEYSTORE_TYPE = "JKS"; + + public static final String SSL_KEYSTORE_LOCATION_CONFIG = "ssl.keystore.location"; + public static final String SSL_KEYSTORE_LOCATION_DOC = "The location of the key store file. " + + "This is optional for Client and can be used for two-way authentication for client."; + + public static final String SSL_KEYSTORE_PASSWORD_CONFIG = "ssl.keystore.password"; + public static final String SSL_KEYSTORE_PASSWORD_DOC = "The store password for the key store file." + + "This is optional for client and only needed if the ssl.keystore.location configured. "; + + public static final String SSL_KEY_PASSWORD_CONFIG = "ssl.key.password"; + public static final String SSL_KEY_PASSWORD_DOC = "The password of the private key in the key store file. " + + "This is optional for client."; + + public static final String SSL_TRUSTSTORE_TYPE_CONFIG = "ssl.truststore.type"; + public static final String SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. " + + "Default value is JKS."; + public static final String DEFAULT_SSL_TRUSTSTORE_TYPE = "JKS"; + + public static final String SSL_TRUSTSTORE_LOCATION_CONFIG = "ssl.truststore.location"; + public static final String SSL_TRUSTSTORE_LOCATION_DOC = "The location of the trust store file. "; + public static final String DEFAULT_TRUSTSTORE_LOCATION = "/tmp/ssl.truststore.jks"; + + public static final String SSL_TRUSTSTORE_PASSWORD_CONFIG = "ssl.truststore.password"; + public static final String SSL_TRUSTSTORE_PASSWORD_DOC = "The password for the trust store file. "; + public static final String DEFAULT_TRUSTSTORE_PASSWORD = "truststore_password"; + + public static final String SSL_CLIENT_REQUIRE_CERT_CONFIG = "ssl.client.require.cert"; + public static final String SSL_CLIENT_REQUIRE_CERT_DOC = "This is to enforce two-way authentication between client and server." + + "Default value is false. If set to true client needs to provide Keystore related config"; + public static final Boolean DEFAULT_SSL_CLIENT_REQUIRE_CERT = false; + + public static final String SSL_KEYMANAGER_ALGORITHM_CONFIG = "ssl.keymanager.algorithm"; + public static final String SSL_KEYMANAGER_ALGORITHM_DOC = "The algorithm used by key manager factory for SSL connections. " + + "Default value is the key manager factory algorithm configured for the Java Virtual Machine."; + public static final String DEFAULT_SSL_KEYMANGER_ALGORITHM = KeyManagerFactory.getDefaultAlgorithm(); + + public static final String SSL_TRUSTMANAGER_ALGORITHM_CONFIG = "ssl.trustmanager.algorithm"; + public static final String SSL_TRUSTMANAGER_ALGORITHM_DOC = "The algorithm used by trust manager factory for SSL connections. " + + "Default value is the trust manager factory algorithm configured for the Java Virtual Machine."; + public static final String DEFAULT_SSL_TRUSTMANAGER_ALGORITHM = TrustManagerFactory.getDefaultAlgorithm(); + + public static final String SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG = "ssl.endpoint.identification.algorithm"; + public static final String SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC = "The endpoint identification algorithm to validate server hostname using server certificate. "; + + public static final String SSL_NEED_CLIENT_AUTH_CONFIG = "ssl.need.client.auth"; + public static final String SSL_NEED_CLIENT_AUTH_CONFIG_DOC = "If set to true kafka broker requires all the ssl client connecting to provide client authentication. " + + "Default value is false"; + public static final Boolean DEFAULT_SSL_NEED_CLIENT_AUTH = false; + + public static final String SSL_WANT_CLIENT_AUTH_CONFIG = "ssl.want.client.auth"; + public static final String SSL_WANT_CLIENT_AUTH_CONFIG_DOC = "If set to true kafka broker requests for client authentication. Clients without any certificates can still be able to connect using SSL."; + public static final Boolean DEFAULT_SSL_WANT_CLIENT_AUTH = false; + +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java b/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java index 920ed88..8ab004f 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java @@ -17,7 +17,7 @@ package org.apache.kafka.common.network; -/* +/** * Authentication for Channel */ @@ -41,10 +41,10 @@ public interface Authenticator { Principal principal() throws KafkaException; /** - * Does authentication and returns SelectionKey.OP if further communication needed - * If no further authentication needs to be done return 0. + * Implements any authentication mechanism. Use transportLayer to read or write tokens. + * If no further authentication needs to be done returns. */ - int authenticate(boolean read, boolean write) throws IOException; + void authenticate() throws IOException; /** * returns true if authentication is complete otherwise returns false; diff --git a/clients/src/main/java/org/apache/kafka/common/network/Channel.java b/clients/src/main/java/org/apache/kafka/common/network/Channel.java index 7b2489e..f7dda3e 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Channel.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Channel.java @@ -26,15 +26,13 @@ import java.nio.ByteBuffer; import java.nio.channels.ScatteringByteChannel; import java.nio.channels.GatheringByteChannel; import java.nio.channels.SocketChannel; +import java.nio.channels.SelectionKey; import java.security.Principal; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** - * - */ public class Channel implements ScatteringByteChannel, GatheringByteChannel { private static final Logger log = LoggerFactory.getLogger(Channel.class); @@ -54,33 +52,39 @@ public class Channel implements ScatteringByteChannel, GatheringByteChannel { /** * returns user principal for the session - * Incase of PLAINTEXT and No Authentication returns ANONYMOUS as the userPrincipal + * In case of PLAINTEXT and No Authentication returns ANONYMOUS as the userPrincipal + * If SSL used than * If SSL used without any SASL Authentication returns SSLSession.peerPrincipal */ public Principal principal() throws IOException { return authenticator.principal(); } - public int connect(boolean read, boolean write) throws IOException { + public void connect() throws IOException { if (transportLayer.isReady() && authenticator.isComplete()) - return 0; - int status = 0; + return; if (!transportLayer.isReady()) - status = transportLayer.handshake(read, write); - if (status == 0 && !authenticator.isComplete()) - status = authenticator.authenticate(read, write); - return status; + transportLayer.handshake(); + if (transportLayer.isReady() && !authenticator.isComplete()) + authenticator.authenticate(); } + public void disconnect() { + transportLayer.disconnect(); + } public boolean isOpen() { - return transportLayer.isOpen(); + return transportLayer.socketChannel().isOpen(); } public SocketChannel socketChannel() { return transportLayer.socketChannel(); } + public TransportLayer transportLayer() { + return transportLayer; + } + /** * Writes a sequence of bytes to this channel from the given buffer. */ @@ -114,8 +118,24 @@ public class Channel implements ScatteringByteChannel, GatheringByteChannel { return transportLayer.read(dsts, offset, length); } - public boolean finishConnect() throws IOException { - return transportLayer.finishConnect(); + public void finishConnect() throws IOException { + transportLayer.finishConnect(); + } + + public void addInterestOps(int ops) { + transportLayer.addInterestOps(ops); + } + + public void removeInterestOps(int ops) { + transportLayer.removeInterestOps(ops); + } + + public void mute() { + transportLayer.removeInterestOps(SelectionKey.OP_READ); + } + + public void unmute() { + transportLayer.addInterestOps(SelectionKey.OP_READ); } public boolean isReady() { diff --git a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java new file mode 100644 index 0000000..5dd1aef --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package org.apache.kafka.common.network; + +import java.util.Map; +import java.nio.channels.SelectionKey; + +import org.apache.kafka.common.KafkaException; + +/** + * A ChannelBuilder interface to build Channel based on configs + */ +public interface ChannelBuilder { + + /** + * Configure this class with the given key-value pairs + */ + public void configure(Map configs) throws KafkaException; + + + /** + * returns a Channel with TransportLayer and Authenticator configured. + * @param socketChannel + */ + public Channel buildChannel(SelectionKey key) throws KafkaException; + + + /** + * Closes ChannelBuilder + */ + public void close(); + +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java index 3a66e7b..d5e24ad 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java @@ -25,31 +25,39 @@ import org.apache.kafka.common.KafkaException; public class DefaultAuthenticator implements Authenticator { - TransportLayer transportLayer; - PrincipalBuilder principalBuilder; - Principal principal; + private TransportLayer transportLayer; + private PrincipalBuilder principalBuilder; + private Principal principal; public DefaultAuthenticator(TransportLayer transportLayer, PrincipalBuilder principalBuilder) { this.transportLayer = transportLayer; this.principalBuilder = principalBuilder; } - /* + /** * No-Op for default authenticator */ - public int authenticate(boolean read, boolean write) throws IOException { - return 0; - } + public void authenticate() throws IOException {} + /** + * Constructs Principal using configured principalBuilder. + * @return Principal + * @throws KafkaException + */ public Principal principal() throws KafkaException { - if (principal != null) + if (principal == null) principal = principalBuilder.buildPrincipal(transportLayer, this); return principal; } public void close() throws IOException {} + /** + * DefaultAuthenticator doesn't implement any additional authentication. + * @returns true + */ public boolean isComplete() { return true; } + } diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java new file mode 100644 index 0000000..51adce5 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package org.apache.kafka.common.network; + +import java.nio.channels.SelectionKey; +import java.util.Map; + +import org.apache.kafka.common.security.auth.PrincipalBuilder; +import org.apache.kafka.common.config.SecurityConfigs; +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.common.KafkaException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +public class PlainTextChannelBuilder implements ChannelBuilder { + private static final Logger log = LoggerFactory.getLogger(PlainTextChannelBuilder.class); + private PrincipalBuilder principalBuilder; + + public void configure(Map configs) throws KafkaException { + try { + this.principalBuilder = (PrincipalBuilder) Utils.newInstance((Class) configs.get(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG)); + this.principalBuilder.configure(configs); + } catch (Exception e) { + throw new KafkaException(e); + } + } + + public Channel buildChannel(SelectionKey key) throws KafkaException { + Channel channel = null; + try { + PlainTextTransportLayer transportLayer = new PlainTextTransportLayer(key); + Authenticator authenticator = new DefaultAuthenticator(transportLayer, this.principalBuilder); + channel = new Channel(transportLayer, authenticator); + } catch (Exception e) { + log.warn("Failed to create channel due to ", e); + throw new KafkaException(e); + } + return channel; + } + + public void close() { + this.principalBuilder.close(); + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java index 17b014f..eb4504b 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java @@ -26,8 +26,8 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.nio.ByteBuffer; import java.nio.channels.SocketChannel; +import java.nio.channels.SelectionKey; -import javax.net.ssl.SSLSession; import java.security.Principal; import org.apache.kafka.common.security.auth.KafkaPrincipal; @@ -37,15 +37,17 @@ import org.slf4j.LoggerFactory; public class PlainTextTransportLayer implements TransportLayer { private static final Logger log = LoggerFactory.getLogger(PlainTextTransportLayer.class); - SocketChannel socketChannel = null; - DataInputStream inStream = null; - DataOutputStream outStream = null; + private SelectionKey key; + private SocketChannel socketChannel; + private DataInputStream inStream; + private DataOutputStream outStream; + private final Principal principal = new KafkaPrincipal("ANONYMOUS"); - public PlainTextTransportLayer(SocketChannel socketChannel) throws IOException { - this.socketChannel = socketChannel; + public PlainTextTransportLayer(SelectionKey key) throws IOException { + this.key = key; + this.socketChannel = (SocketChannel) key.channel(); } - /** * Closes this channel * @@ -56,26 +58,8 @@ public class PlainTextTransportLayer implements TransportLayer { socketChannel.close(); } - /** - * Flushes the buffer to the network, non blocking - * @param buf ByteBuffer - * @return boolean true if the buffer has been emptied out, false otherwise - * @throws IOException - */ - public boolean flush(ByteBuffer buf) throws IOException { - int remaining = buf.remaining(); - if (remaining > 0) { - int written = socketChannel.write(buf); - return written >= remaining; - } - return true; - } - - /** - * Tells wheter or not this channel is open. - */ - public boolean isOpen() { - return socketChannel.isOpen(); + public void disconnect() { + key.cancel(); } /** @@ -113,21 +97,20 @@ public class PlainTextTransportLayer implements TransportLayer { return socketChannel; } - public boolean finishConnect() throws IOException { - return socketChannel.finishConnect(); + public void finishConnect() throws IOException { + socketChannel.finishConnect(); + int ops = key.interestOps(); + ops &= ~SelectionKey.OP_CONNECT; + ops |= SelectionKey.OP_READ; + key.interestOps(ops); } /** * Performs SSL handshake hence is a no-op for the non-secure * implementation - * @param read Unused in non-secure implementation - * @param write Unused in non-secure implementation - * @return Always return 0 * @throws IOException */ - public int handshake(boolean read, boolean write) throws IOException { - return 0; - } + public void handshake() throws IOException {} public DataInputStream inStream() throws IOException { if (inStream == null) @@ -142,11 +125,15 @@ public class PlainTextTransportLayer implements TransportLayer { } public Principal peerPrincipal() throws IOException { - return new KafkaPrincipal("ANONYMOUS"); + return principal; + } + + public void addInterestOps(int ops) { + key.interestOps(key.interestOps() | ops); } - public SSLSession sslSession() throws IllegalStateException, UnsupportedOperationException { - throw new UnsupportedOperationException("sslSession not supported for PlainTextTransportLayer"); + public void removeInterestOps(int ops) { + key.interestOps(key.interestOps() & ~ops); } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java new file mode 100644 index 0000000..22fec8b --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package org.apache.kafka.common.network; + +import java.nio.channels.SelectionKey; +import java.nio.channels.SocketChannel; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import org.apache.kafka.common.security.auth.PrincipalBuilder; +import org.apache.kafka.common.config.SecurityConfigs; +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.common.KafkaException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SSLChannelBuilder implements ChannelBuilder { + private static final Logger log = LoggerFactory.getLogger(SSLChannelBuilder.class); + private SSLFactory sslFactory; + private ExecutorService executorService; + private PrincipalBuilder principalBuilder; + + public void configure(Map configs) throws KafkaException { + try { + this.executorService = Executors.newScheduledThreadPool(1); + this.sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT); + this.sslFactory.configure(configs); + this.principalBuilder = (PrincipalBuilder) Utils.newInstance((Class) configs.get(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG)); + this.principalBuilder.configure(configs); + } catch (Exception e) { + throw new KafkaException(e); + } + } + + public Channel buildChannel(SelectionKey key) throws KafkaException { + Channel channel = null; + try { + SocketChannel socketChannel = (SocketChannel) key.channel(); + SSLTransportLayer transportLayer = new SSLTransportLayer(key, + sslFactory.createSSLEngine(socketChannel.socket().getInetAddress().getHostName(), + socketChannel.socket().getPort()), + executorService); + Authenticator authenticator = new DefaultAuthenticator(transportLayer, this.principalBuilder); + channel = new Channel(transportLayer, authenticator); + } catch (Exception e) { + log.info("Failed to create channel due to ", e); + throw new KafkaException(e); + } + return channel; + } + + public void close() { + this.executorService.shutdown(); + this.principalBuilder.close(); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java index 7fbb9d7..557c5f9 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java @@ -27,7 +27,7 @@ import javax.net.ssl.*; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Configurable; -import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.common.config.SecurityConfigs; public class SSLFactory implements Configurable { @@ -42,8 +42,10 @@ public class SSLFactory implements Configurable { private SecurityStore truststore; private String[] cipherSuites; private String[] enabledProtocols; + private String endpointIdentification; private SSLContext sslContext; - private boolean requireClientCert; + private boolean needClientAuth; + private boolean wantClientAuth; private Mode mode; @@ -53,30 +55,44 @@ public class SSLFactory implements Configurable { @Override public void configure(Map configs) throws KafkaException { - this.protocol = (String) configs.get(CommonClientConfigs.SSL_PROTOCOL_CONFIG); + this.protocol = (String) configs.get(SecurityConfigs.SSL_PROTOCOL_CONFIG); + this.provider = (String) configs.get(SecurityConfigs.SSL_PROVIDER_CONFIG); - if (configs.get(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG) != null) { - List cipherSuitesList = (List) configs.get(CommonClientConfigs.SSL_CIPHER_SUITES_CONFIG); + if (configs.get(SecurityConfigs.SSL_CIPHER_SUITES_CONFIG) != null) { + List cipherSuitesList = (List) configs.get(SecurityConfigs.SSL_CIPHER_SUITES_CONFIG); this.cipherSuites = (String[]) cipherSuitesList.toArray(new String[cipherSuitesList.size()]); } - if (configs.get(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG) != null) { - List enabledProtocolsList = (List) configs.get(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG); + if (configs.get(SecurityConfigs.SSL_ENABLED_PROTOCOLS_CONFIG) != null) { + List enabledProtocolsList = (List) configs.get(SecurityConfigs.SSL_ENABLED_PROTOCOLS_CONFIG); this.enabledProtocols = (String[]) enabledProtocolsList.toArray(new String[enabledProtocolsList.size()]); } - this.requireClientCert = (Boolean) configs.get(CommonClientConfigs.SSL_CLIENT_REQUIRE_CERT_CONFIG); - this.kmfAlgorithm = (String) configs.get(CommonClientConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG); - this.tmfAlgorithm = (String) configs.get(CommonClientConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG); - if ((mode == Mode.CLIENT && requireClientCert) || (mode == Mode.SERVER)) { - createKeystore((String) configs.get(CommonClientConfigs.SSL_KEYSTORE_TYPE_CONFIG), - (String) configs.get(CommonClientConfigs.SSL_KEYSTORE_LOCATION_CONFIG), - (String) configs.get(CommonClientConfigs.SSL_KEYSTORE_PASSWORD_CONFIG), - (String) configs.get(CommonClientConfigs.SSL_KEY_PASSWORD_CONFIG)); + if (configs.containsKey(SecurityConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG)) { + this.endpointIdentification = (String) configs.get(SecurityConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG); } - createTruststore((String) configs.get(CommonClientConfigs.SSL_TRUSTSTORE_TYPE_CONFIG), - (String) configs.get(CommonClientConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG), - (String) configs.get(CommonClientConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG)); + + if (configs.containsKey(SecurityConfigs.SSL_NEED_CLIENT_AUTH_CONFIG)) { + this.needClientAuth = (Boolean) configs.get(SecurityConfigs.SSL_NEED_CLIENT_AUTH_CONFIG); + } + + if (configs.containsKey(SecurityConfigs.SSL_WANT_CLIENT_AUTH_CONFIG)) { + this.wantClientAuth = (Boolean) configs.get(SecurityConfigs.SSL_WANT_CLIENT_AUTH_CONFIG); + } + + this.kmfAlgorithm = (String) configs.get(SecurityConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG); + this.tmfAlgorithm = (String) configs.get(SecurityConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG); + + if (checkKeyStoreConfigs(configs)) { + createKeystore((String) configs.get(SecurityConfigs.SSL_KEYSTORE_TYPE_CONFIG), + (String) configs.get(SecurityConfigs.SSL_KEYSTORE_LOCATION_CONFIG), + (String) configs.get(SecurityConfigs.SSL_KEYSTORE_PASSWORD_CONFIG), + (String) configs.get(SecurityConfigs.SSL_KEY_PASSWORD_CONFIG)); + } + + createTruststore((String) configs.get(SecurityConfigs.SSL_TRUSTSTORE_TYPE_CONFIG), + (String) configs.get(SecurityConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG), + (String) configs.get(SecurityConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG)); try { this.sslContext = createSSLContext(); } catch (Exception e) { @@ -114,42 +130,31 @@ public class SSLFactory implements Configurable { public SSLEngine createSSLEngine(String peerHost, int peerPort) { SSLEngine sslEngine = sslContext.createSSLEngine(peerHost, peerPort); if (cipherSuites != null) sslEngine.setEnabledCipherSuites(cipherSuites); + if (enabledProtocols != null) sslEngine.setEnabledProtocols(enabledProtocols); + if (mode == Mode.SERVER) { sslEngine.setUseClientMode(false); + if (needClientAuth) + sslEngine.setNeedClientAuth(needClientAuth); + else if (wantClientAuth) + sslEngine.setNeedClientAuth(wantClientAuth); } else { sslEngine.setUseClientMode(true); - sslEngine.setNeedClientAuth(requireClientCert); + SSLParameters sslParams = sslEngine.getSSLParameters(); + sslParams.setEndpointIdentificationAlgorithm(endpointIdentification); + sslEngine.setSSLParameters(sslParams); } - if (enabledProtocols != null) sslEngine.setEnabledProtocols(enabledProtocols); return sslEngine; } /** - * Returns a configured SSLServerSocketFactory. - * - * @return the configured SSLSocketFactory. - * @throws GeneralSecurityException thrown if the SSLSocketFactory could not - * be initialized. - * @throws IOException thrown if and IO error occurred while loading - * the server keystore. + * Returns a configured SSLContext. + * @return SSLContext. */ - public SSLServerSocketFactory createSSLServerSocketFactory() throws GeneralSecurityException, IOException { - if (mode != Mode.SERVER) { - throw new IllegalStateException("Factory is in CLIENT mode"); - } - return sslContext.getServerSocketFactory(); - } - - /** - * Returns if client certificates are required or not. - * - * @return if client certificates are required or not. - */ - public boolean isClientCertRequired() { - return requireClientCert; + public SSLContext sslContext() { + return sslContext; } - private void createKeystore(String type, String path, String password, String keyPassword) { if (path == null && password != null) { throw new KafkaException("SSL key store password is not specified."); @@ -171,6 +176,12 @@ public class SSLFactory implements Configurable { } } + private boolean checkKeyStoreConfigs(Map configs) { + return configs.containsKey(SecurityConfigs.SSL_KEYSTORE_TYPE_CONFIG) && + configs.containsKey(SecurityConfigs.SSL_KEYSTORE_LOCATION_CONFIG) && + configs.containsKey(SecurityConfigs.SSL_KEYSTORE_PASSWORD_CONFIG) && + configs.containsKey(SecurityConfigs.SSL_KEY_PASSWORD_CONFIG); + } private class SecurityStore { private final String type; diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index 0267e85..f25e537 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -18,9 +18,11 @@ package org.apache.kafka.common.network; import java.io.IOException; +import java.io.EOFException; import java.nio.ByteBuffer; import java.nio.channels.SocketChannel; import java.nio.channels.SelectionKey; +import java.nio.channels.CancelledKeyException; import java.security.Principal; import javax.net.ssl.SSLEngine; @@ -46,38 +48,37 @@ import org.slf4j.LoggerFactory; public class SSLTransportLayer implements TransportLayer { private static final Logger log = LoggerFactory.getLogger(SSLTransportLayer.class); protected SSLEngine sslEngine; - + private SelectionKey key; private SocketChannel socketChannel; private HandshakeStatus handshakeStatus; private SSLEngineResult handshakeResult; private boolean handshakeComplete = false; private boolean closed = false; private boolean closing = false; - private ByteBuffer netInBuffer; - private ByteBuffer netOutBuffer; + private ByteBuffer netReadBuffer; + private ByteBuffer netWriteBuffer; private ByteBuffer appReadBuffer; - private ByteBuffer appWriteBuffer; private ByteBuffer emptyBuf = ByteBuffer.allocate(0); private DataInputStream inStream; private DataOutputStream outStream; private ExecutorService executorService; + private int interestOps; - public SSLTransportLayer(SocketChannel socketChannel, SSLEngine sslEngine, ExecutorService executorService) throws IOException { - this.socketChannel = socketChannel; + public SSLTransportLayer(SelectionKey key, SSLEngine sslEngine, ExecutorService executorService) throws IOException { + this.key = key; + this.socketChannel = (SocketChannel) key.channel(); this.sslEngine = sslEngine; this.executorService = executorService; - this.netInBuffer = ByteBuffer.allocateDirect(sslEngine.getSession().getPacketBufferSize()); - this.netOutBuffer = ByteBuffer.allocateDirect(sslEngine.getSession().getPacketBufferSize()); - this.appWriteBuffer = ByteBuffer.allocateDirect(sslEngine.getSession().getApplicationBufferSize()); - this.appReadBuffer = ByteBuffer.allocateDirect(sslEngine.getSession().getApplicationBufferSize()); - startHandshake(); + this.netReadBuffer = ByteBuffer.allocateDirect(packetBufferSize()); + this.netWriteBuffer = ByteBuffer.allocateDirect(packetBufferSize()); + this.appReadBuffer = ByteBuffer.allocateDirect(applicationBufferSize()); } - public void startHandshake() throws IOException { - netOutBuffer.position(0); - netOutBuffer.limit(0); - netInBuffer.position(0); - netInBuffer.limit(0); + private void startHandshake() throws IOException { + netWriteBuffer.position(0); + netWriteBuffer.limit(0); + netReadBuffer.position(0); + netReadBuffer.limit(0); handshakeComplete = false; closed = false; closing = false; @@ -90,8 +91,16 @@ public class SSLTransportLayer implements TransportLayer { return socketChannel; } - public boolean finishConnect() throws IOException { - return socketChannel.finishConnect(); + public void finishConnect() throws IOException { + socketChannel.finishConnect(); + removeInterestOps(SelectionKey.OP_CONNECT); + addInterestOps(SelectionKey.OP_READ); + key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT); + startHandshake(); + } + + public void disconnect() { + key.cancel(); } /** @@ -100,7 +109,7 @@ public class SSLTransportLayer implements TransportLayer { * @return boolean true if the buffer has been emptied out, false otherwise * @throws IOException */ - public boolean flush(ByteBuffer buf) throws IOException { + private boolean flush(ByteBuffer buf) throws IOException { int remaining = buf.remaining(); if (remaining > 0) { int written = socketChannel.write(buf); @@ -111,62 +120,74 @@ public class SSLTransportLayer implements TransportLayer { /** * Performs SSL handshake, non blocking. - * The return for this operation is 0 if the handshake is complete and a positive value if it is not complete. - * In the event of a positive value coming back, re-register the selection key for the return values interestOps. - * @param read boolean - true if the underlying channel is readable - * @param write boolean - true if the underlying channel is writable - * @return int - 0 if hand shake is complete, otherwise it returns a SelectionKey interestOps value * @throws IOException */ - public int handshake(boolean read, boolean write) throws IOException { - if (handshakeComplete) return 0; //we have done our initial handshake + public void handshake() throws IOException { + boolean read = key.isReadable(); + boolean write = key.isWritable(); + handshakeComplete = false; + handshakeStatus = sslEngine.getHandshakeStatus(); - if (!flush(netOutBuffer)) return SelectionKey.OP_WRITE; + if (!flush(netWriteBuffer)) { + key.interestOps(SelectionKey.OP_WRITE); + return; + } try { switch(handshakeStatus) { - case NOT_HANDSHAKING: - // SSLEnginge.getHandshakeStatus is transient and it doesn't record FINISHED status properly - if (handshakeResult.getHandshakeStatus() == HandshakeStatus.FINISHED) { - handshakeComplete = !netOutBuffer.hasRemaining(); - if (handshakeComplete) - return 0; - else - return SelectionKey.OP_WRITE; - } else { - throw new IOException("NOT_HANDSHAKING during handshake"); - } - case FINISHED: - //we are complete if we have delivered the last package - handshakeComplete = !netOutBuffer.hasRemaining(); - //return 0 if we are complete, otherwise we still have data to write - if (handshakeComplete) return 0; - else return SelectionKey.OP_WRITE; + case NEED_TASK: + handshakeStatus = tasks(); + break; case NEED_WRAP: handshakeResult = handshakeWrap(write); - if (handshakeResult.getStatus() == Status.OK) { - if (handshakeStatus == HandshakeStatus.NEED_TASK) - handshakeStatus = tasks(); - } else { - //wrap should always work with our buffers - throw new IOException("Unexpected status [" + handshakeResult.getStatus() + "] during handshake WRAP."); + if (handshakeResult.getStatus() == Status.BUFFER_OVERFLOW) { + int currentPacketBufferSize = packetBufferSize(); + netWriteBuffer = Utils.ensureCapacity(netWriteBuffer, currentPacketBufferSize); + if (netWriteBuffer.position() > currentPacketBufferSize) { + throw new IllegalStateException("Buffer overflow when available data (" + netWriteBuffer.position() + + ") > network buffer size (" + currentPacketBufferSize + ")"); + } + } else if (handshakeResult.getStatus() == Status.BUFFER_UNDERFLOW) { + throw new IllegalStateException("Should not have received BUFFER_UNDERFLOW during handshake WRAP."); + } else if (handshakeResult.getStatus() == Status.CLOSED) { + throw new EOFException(); } - if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || (!flush(netOutBuffer))) - return SelectionKey.OP_WRITE; //fall down to NEED_UNWRAP on the same call, will result in a //BUFFER_UNDERFLOW if it needs data + if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || !flush(netWriteBuffer)) { + key.interestOps(SelectionKey.OP_WRITE); + break; + } case NEED_UNWRAP: handshakeResult = handshakeUnwrap(read); - if (handshakeResult.getStatus() == Status.OK) { - if (handshakeStatus == HandshakeStatus.NEED_TASK) - handshakeStatus = tasks(); - } else if (handshakeResult.getStatus() == Status.BUFFER_UNDERFLOW) { - return SelectionKey.OP_READ; - } else { - throw new IOException(String.format("Unexpected status [%s] during handshake UNWRAP", handshakeStatus)); + if (handshakeResult.getStatus() == Status.BUFFER_UNDERFLOW) { + int currentPacketBufferSize = packetBufferSize(); + netReadBuffer = Utils.ensureCapacity(netReadBuffer, currentPacketBufferSize); + if (netReadBuffer.position() >= currentPacketBufferSize) { + throw new IllegalStateException("Buffer underflow when there is available data"); + } + if (!read) key.interestOps(SelectionKey.OP_READ); + } else if (handshakeResult.getStatus() == Status.BUFFER_OVERFLOW) { + int currentAppBufferSize = applicationBufferSize(); + netReadBuffer = Utils.ensureCapacity(netReadBuffer, currentAppBufferSize); + if (netReadBuffer.position() > currentAppBufferSize) { + throw new IllegalStateException("Buffer underflow when available data (" + netReadBuffer.position() + + ") > packet buffer size (" + currentAppBufferSize + ")"); + } + + if (!read) key.interestOps(SelectionKey.OP_READ); + } else if (handshakeResult.getStatus() == Status.CLOSED) { + throw new EOFException("SSL handshake status CLOSED during handshake UNWRAP"); } + //if handshakeStatus completed than fall-through to finished status. + //after handshake is finished there is no data left to read/write in socketChannel. + //so the selector won't invoke this channel if we don't go through the handshakeFinished here. + if (handshakeStatus != HandshakeStatus.FINISHED) + break; + case FINISHED: + handshakeFinished(); break; - case NEED_TASK: - handshakeStatus = tasks(); + case NOT_HANDSHAKING: + handshakeFinished(); break; default: throw new IllegalStateException(String.format("Unexpected status [%s]", handshakeStatus)); @@ -175,27 +196,25 @@ public class SSLTransportLayer implements TransportLayer { handshakeFailure(); throw e; } - - //return 0 if we are complete, otherwise re-register for any activity that - //would cause this method to be called again. - if (handshakeComplete) return 0; - else return SelectionKey.OP_WRITE | SelectionKey.OP_READ; } + /** - * Executes all the tasks needed on the executorservice thread. + * Executes the SSLEngine tasks needed on the executorservice thread. * @return HandshakeStatus */ private HandshakeStatus tasks() { - for (;;) { - final Runnable task = sslEngine.getDelegatedTask(); - if (task == null) - break; + final Runnable task = delegatedTask(); + if (task != null) { + // un-register read/write ops while the delegated tasks are running. + key.interestOps(0); executorService.submit(new Runnable() { @Override public void run() { task.run(); + // register read/write ops to continue handshake. + key.interestOps(SelectionKey.OP_READ | SelectionKey.OP_WRITE); } }); } @@ -203,6 +222,27 @@ public class SSLTransportLayer implements TransportLayer { } /** + * Checks if the handshake status is finished + * Sets the interestOps for the selectionKey. + */ + private void handshakeFinished() throws IOException { + // SSLEnginge.getHandshakeStatus is transient and it doesn't record FINISHED status properly. + // It can move from FINISHED status to NOT_HANDSHAKING after the handshake is completed. + // Hence we also need to check handshakeResult.getHandshakeStatus() if the handshake finished or not + if (handshakeResult.getHandshakeStatus() == HandshakeStatus.FINISHED) { + //we are complete if we have delivered the last package + handshakeComplete = !netWriteBuffer.hasRemaining(); + //set interestOps if we are complete, otherwise we still have data to write + if (handshakeComplete) + key.interestOps(interestOps); + else + key.interestOps(SelectionKey.OP_WRITE); + } else { + throw new IOException("NOT_HANDSHAKING during handshake"); + } + } + + /** * Performs the WRAP function * @param doWrite boolean * @return SSLEngineResult @@ -211,13 +251,13 @@ public class SSLTransportLayer implements TransportLayer { private SSLEngineResult handshakeWrap(Boolean doWrite) throws IOException { //this should never be called with a network buffer that contains data //so we can clear it here. - netOutBuffer.clear(); - SSLEngineResult result = sslEngine.wrap(appWriteBuffer, netOutBuffer); + netWriteBuffer.clear(); + SSLEngineResult result = sslEngine.wrap(emptyBuf, netWriteBuffer); //prepare the results to be written - netOutBuffer.flip(); + netWriteBuffer.flip(); handshakeStatus = result.getHandshakeStatus(); //optimization, if we do have a writable channel, write it now - if (doWrite) flush(netOutBuffer); + if (doWrite) flush(netWriteBuffer); return result; } @@ -228,23 +268,22 @@ public class SSLTransportLayer implements TransportLayer { * @throws IOException */ private SSLEngineResult handshakeUnwrap(Boolean doRead) throws IOException { - if (netInBuffer.position() == netInBuffer.limit()) { + if (netReadBuffer.position() == netReadBuffer.limit()) { //clear the buffer if we have emptied it out on data - netInBuffer.clear(); + netReadBuffer.clear(); } if (doRead) { - int read = socketChannel.read(netInBuffer); + int read = socketChannel.read(netReadBuffer); if (read == -1) throw new IOException("EOF during handshake."); } - SSLEngineResult result; boolean cont = false; do { //prepare the buffer with the incoming data - netInBuffer.flip(); - result = sslEngine.unwrap(netInBuffer, appWriteBuffer); - netInBuffer.compact(); + netReadBuffer.flip(); + result = sslEngine.unwrap(netReadBuffer, appReadBuffer); + netReadBuffer.compact(); handshakeStatus = result.getHandshakeStatus(); if (result.getStatus() == SSLEngineResult.Status.OK && result.getHandshakeStatus() == HandshakeStatus.NEED_TASK) { @@ -267,22 +306,22 @@ public class SSLTransportLayer implements TransportLayer { closing = true; sslEngine.closeOutbound(); - if (!flush(netOutBuffer)) { + if (!flush(netWriteBuffer)) { throw new IOException("Remaining data in the network buffer, can't send SSL close message."); } //prep the buffer for the close message - netOutBuffer.clear(); + netWriteBuffer.clear(); //perform the close, since we called sslEngine.closeOutbound - SSLEngineResult handshake = sslEngine.wrap(emptyBuf, netOutBuffer); + SSLEngineResult handshake = sslEngine.wrap(emptyBuf, netWriteBuffer); //we should be in a close state if (handshake.getStatus() != SSLEngineResult.Status.CLOSED) { throw new IOException("Invalid close state, will not send network data."); } - netOutBuffer.flip(); - flush(netOutBuffer); + netWriteBuffer.flip(); + flush(netWriteBuffer); socketChannel.socket().close(); socketChannel.close(); - closed = !netOutBuffer.hasRemaining() && (handshake.getHandshakeStatus() != HandshakeStatus.NEED_WRAP); + closed = !netWriteBuffer.hasRemaining() && (handshake.getHandshakeStatus() != HandshakeStatus.NEED_WRAP); } public boolean isOpen() { @@ -299,37 +338,63 @@ public class SSLTransportLayer implements TransportLayer { * @param dst The buffer into which bytes are to be transferred * @return The number of bytes read, possible zero or -1 if the channel has reached end-of-stream * @throws IOException if some other I/O error occurs - * @throws IllegalStateException if handshake is not complete. */ public int read(ByteBuffer dst) throws IOException { if (closing || closed) return -1; - if (!handshakeComplete) throw new IllegalStateException("Handshake incomplete."); - netInBuffer = Utils.ensureCapacity(netInBuffer, packetBufferSize()); - int netread = socketChannel.read(netInBuffer); - if (netread == -1) return -1; int read = 0; - SSLEngineResult unwrap = null; - do { - netInBuffer.flip(); - unwrap = sslEngine.unwrap(netInBuffer, appReadBuffer); - //compact the buffer - netInBuffer.compact(); - if (unwrap.getStatus() == Status.OK || unwrap.getStatus() == Status.BUFFER_UNDERFLOW) { - read += unwrap.bytesProduced(); - // perform any task if needed - if (unwrap.getHandshakeStatus() == HandshakeStatus.NEED_TASK) tasks(); - //if we need more network data, than return for now. - if (unwrap.getStatus() == Status.BUFFER_UNDERFLOW) return readFromAppBuffer(dst); - } else if (unwrap.getStatus() == Status.BUFFER_OVERFLOW) { - appReadBuffer = Utils.ensureCapacity(appReadBuffer, applicationBufferSize()); - //empty out the dst buffer before we do another read - return readFromAppBuffer(dst); - } - } while(netInBuffer.position() != 0); - return readFromAppBuffer(dst); + //if we have unread decrypted data in appReadBuffer read that into dst buffer. + if (appReadBuffer.position() > 0) { + read = readFromAppBuffer(dst); + } + + if (dst.remaining() > 0) { + boolean canRead = true; + do { + netReadBuffer = Utils.ensureCapacity(netReadBuffer, packetBufferSize()); + if (canRead && netReadBuffer.remaining() > 0) { + int netread = socketChannel.read(netReadBuffer); + canRead = netread > 0; + } + netReadBuffer.flip(); + SSLEngineResult unwrap = sslEngine.unwrap(netReadBuffer, appReadBuffer); + netReadBuffer.compact(); + + // handle ssl renegotiation. + if (unwrap.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) { + handshake(); + break; + } + + if (unwrap.getStatus() == Status.OK) { + read += readFromAppBuffer(dst); + } else if (unwrap.getStatus() == Status.BUFFER_OVERFLOW) { + int currentApplicationBufferSize = applicationBufferSize(); + appReadBuffer = Utils.ensureCapacity(appReadBuffer, currentApplicationBufferSize); + if (appReadBuffer.position() > 0) { + break; + } else if (appReadBuffer.position() >= currentApplicationBufferSize) { + throw new IllegalStateException("Buffer overflow when available data (" + appReadBuffer.position() + + ") > application buffer size (" + currentApplicationBufferSize + ")"); + } + } else if (unwrap.getStatus() == Status.BUFFER_UNDERFLOW) { + int currentPacketBufferSize = packetBufferSize(); + netReadBuffer = Utils.ensureCapacity(netReadBuffer, currentPacketBufferSize); + if (netReadBuffer.position() >= currentPacketBufferSize) { + throw new IllegalStateException("Buffer underflow when available data (" + netReadBuffer.position() + + ") > packet buffer size (" + currentPacketBufferSize + ")"); + } + if (!canRead) + break; + } else if (unwrap.getStatus() == Status.CLOSED) { + throw new EOFException(); + } + } while(netReadBuffer.position() != 0); + } + return read; } + public long read(ByteBuffer[] dsts) throws IOException { return read(dsts, 0, dsts.length); } @@ -352,23 +417,28 @@ public class SSLTransportLayer implements TransportLayer { * @return The number of bytes written, possibly zero * @throws IOException If some other I/O error occurs */ - public int write(ByteBuffer src) throws IOException { int written = 0; if (closing || closed) throw new IOException("Channel is in closing state"); - if (!flush(netOutBuffer)) + + if (!flush(netWriteBuffer)) return written; - netOutBuffer.clear(); - SSLEngineResult result = sslEngine.wrap(src, netOutBuffer); - written = result.bytesConsumed(); - netOutBuffer.flip(); - if (result.getStatus() == Status.OK) { - if (result.getHandshakeStatus() == HandshakeStatus.NEED_TASK) - tasks(); - } else { - throw new IOException(String.format("Unable to wrap data, invalid status %s", result.getStatus())); + netWriteBuffer.clear(); + SSLEngineResult wrap = sslEngine.wrap(src, netWriteBuffer); + netWriteBuffer.flip(); + if (wrap.getStatus() == Status.OK) { + written = wrap.bytesConsumed(); + flush(netWriteBuffer); + } else if (wrap.getStatus() == Status.BUFFER_OVERFLOW) { + int currentPacketBufferSize = packetBufferSize(); + netWriteBuffer = Utils.ensureCapacity(netReadBuffer, packetBufferSize()); + if (netWriteBuffer.position() > currentPacketBufferSize) + throw new IllegalStateException("SSL BUFFER_OVERFLOW when available data (" + netWriteBuffer.position() + ") > network buffer size (" + currentPacketBufferSize + ")"); + } else if (wrap.getStatus() == Status.BUFFER_UNDERFLOW) { + throw new IllegalStateException("SSL BUFFER_UNDERFLOW during write"); + } else if (wrap.getStatus() == Status.CLOSED) { + throw new EOFException(); } - flush(netOutBuffer); return written; } @@ -389,18 +459,31 @@ public class SSLTransportLayer implements TransportLayer { return write(srcs, 0, srcs.length); } + /** + * socket's InputStream as DataInputStream + * @return DataInputStream + */ public DataInputStream inStream() throws IOException { if (inStream == null) this.inStream = new DataInputStream(socketChannel.socket().getInputStream()); return inStream; } + + /** + * socket's OutputStream as DataOutputStream + * @return DataInputStream + */ public DataOutputStream outStream() throws IOException { if (outStream == null) this.outStream = new DataOutputStream(socketChannel.socket().getOutputStream()); return outStream; } + /** + * SSLSession's peerPrincipal for the remote host. + * @return Principal + */ public Principal peerPrincipal() throws IOException { try { return sslEngine.getSession().getPeerPrincipal(); @@ -409,29 +492,65 @@ public class SSLTransportLayer implements TransportLayer { } } - public SSLSession sslSession() throws IllegalStateException, UnsupportedOperationException { - if (!handshakeComplete) - throw new IllegalStateException("Handshake incomplete."); + /** + * returns a SSL Session after the handshake is established + * throws IlleagalStateException if the handshake is not established + */ + public SSLSession sslSession() throws IllegalStateException { return sslEngine.getSession(); } + /** + * Adds interestOps to SelecitonKey of the TransportLayer + * @param ops SelectionKey interestOps + */ + public void addInterestOps(int ops) { + interestOps |= ops; + // if handshake is not complete and key is cancelled. + // we should check for key.isValid. + if (handshakeComplete) + key.interestOps(interestOps); + else if (!key.isValid()) + throw new CancelledKeyException(); + } + + /** + * removes interestOps to SelecitonKey of the TransportLayer + * @param ops SelectionKey interestOps + */ + public void removeInterestOps(int ops) { + interestOps &= ~ops; + // if handshake is not complete and key is cancelled. + // we should check for key.isValid. + if (handshakeComplete) + key.interestOps(interestOps); + else if (!key.isValid()) + throw new CancelledKeyException(); + } + + + /** + * returns delegatedTask for the SSLEngine. + */ + protected Runnable delegatedTask() { + return sslEngine.getDelegatedTask(); + } + + /** + * transfers appReadBuffer contents (decrypted data) into dst bytebuffer + * @param dst ByteBuffer + */ private int readFromAppBuffer(ByteBuffer dst) { appReadBuffer.flip(); - try { - int remaining = appReadBuffer.remaining(); - if (remaining > 0) { - if (remaining > dst.remaining()) - remaining = dst.remaining(); - int i = 0; - while (i < remaining) { - dst.put(appReadBuffer.get()); - i++; - } - } - return remaining; - } finally { - appReadBuffer.compact(); + int remaining = Math.min(appReadBuffer.remaining(), dst.remaining()); + if (remaining > 0) { + int limit = appReadBuffer.limit(); + appReadBuffer.limit(appReadBuffer.position() + remaining); + dst.put(appReadBuffer); + appReadBuffer.limit(limit); } + appReadBuffer.compact(); + return remaining; } private int packetBufferSize() { diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index ea95858..024059e 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -29,8 +29,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.metrics.Measurable; @@ -42,11 +40,7 @@ import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.Count; import org.apache.kafka.common.metrics.stats.Max; import org.apache.kafka.common.metrics.stats.Rate; -import org.apache.kafka.common.protocol.SecurityProtocol; -import org.apache.kafka.common.security.auth.PrincipalBuilder; -import org.apache.kafka.common.security.auth.DefaultPrincipalBuilder; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.clients.CommonClientConfigs; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -94,14 +88,13 @@ public class Selector implements Selectable { private final SelectorMetrics sensors; private final String metricGrpPrefix; private final Map metricTags; - private final SecurityProtocol securityProtocol; - private SSLFactory sslFactory = null; - private ExecutorService executorService = null; + private final ChannelBuilder channelBuilder; + /** * Create a new selector */ - public Selector(Metrics metrics, Time time, String metricGrpPrefix, Map metricTags, Map configs) { + public Selector(Metrics metrics, Time time, String metricGrpPrefix, Map metricTags, ChannelBuilder channelBuilder) { try { this.selector = java.nio.channels.Selector.open(); } catch (IOException e) { @@ -118,13 +111,7 @@ public class Selector implements Selectable { this.disconnected = new ArrayList(); this.failedSends = new ArrayList(); this.sensors = new SelectorMetrics(metrics); - this.securityProtocol = configs.containsKey(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG) ? - SecurityProtocol.valueOf((String) configs.get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)) : SecurityProtocol.PLAINTEXT; - if (securityProtocol == SecurityProtocol.SSL) { - this.executorService = Executors.newScheduledThreadPool(1); - this.sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT); - this.sslFactory.configure(configs); - } + this.channelBuilder = channelBuilder; } /** @@ -162,20 +149,9 @@ public class Selector implements Selectable { throw e; } - TransportLayer transportLayer; - if (securityProtocol == SecurityProtocol.SSL) { - transportLayer = new SSLTransportLayer(socketChannel, - sslFactory.createSSLEngine(socket.getInetAddress().getHostName(), - socket.getPort()), - executorService); - } else { - transportLayer = new PlainTextTransportLayer(socketChannel); - } - PrincipalBuilder principalBuilder = new DefaultPrincipalBuilder(); - Authenticator authenticator = new DefaultAuthenticator(transportLayer, principalBuilder); - Channel channel = new Channel(transportLayer, authenticator); SelectionKey key = socketChannel.register(this.selector, SelectionKey.OP_CONNECT); key.attach(new Transmissions(id)); + Channel channel = channelBuilder.buildChannel(key); this.keys.put(id, key); this.channels.put(key, channel); } @@ -186,9 +162,9 @@ public class Selector implements Selectable { */ @Override public void disconnect(int id) { - SelectionKey key = this.keys.get(id); - if (key != null) - key.cancel(); + Channel channel = channelForId(id); + if (channel != null) + channel.disconnect(); } /** @@ -208,8 +184,6 @@ public class Selector implements Selectable { close(key); try { this.selector.close(); - if (this.executorService != null) - this.executorService.shutdown(); } catch (IOException e) { log.error("Exception closing selector:", e); } catch (SecurityException se) { @@ -223,12 +197,13 @@ public class Selector implements Selectable { */ public void send(NetworkSend send) { SelectionKey key = keyForId(send.destination()); + Channel channel = channel(key); Transmissions transmissions = transmissions(key); if (transmissions.hasSend()) throw new IllegalStateException("Attempt to begin a send operation with prior send operation still in progress."); transmissions.send = send; try { - key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); + channel.addInterestOps(SelectionKey.OP_WRITE); } catch (CancelledKeyException e) { close(key); this.failedSends.add(send.destination()); @@ -275,52 +250,38 @@ public class Selector implements Selectable { /* complete any connections that have finished their handshake */ if (key.isConnectable()) { channel.finishConnect(); - key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT | SelectionKey.OP_READ); this.connected.add(transmissions.id); this.sensors.connectionCreated.record(); - } - /* read from any connections that have readable data */ - if (key.isReadable()) { - if (!channel.isReady()) { - int status = channel.connect(key.isReadable(), key.isWritable()); - if (status == 0) - key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT | SelectionKey.OP_READ); - else - key.interestOps(status); - } else { + if (!channel.isReady()) { + channel.connect(); + } else { + /* read from any connections that have readable data */ + if (key.isReadable()) { if (!transmissions.hasReceive()) transmissions.receive = new NetworkReceive(transmissions.id); - transmissions.receive.readFrom(channel); - if (transmissions.receive.complete()) { + while (transmissions.receive.readFrom(channel) > 0 && transmissions.receive.complete()) { transmissions.receive.payload().rewind(); this.completedReceives.add(transmissions.receive); this.sensors.recordBytesReceived(transmissions.id, transmissions.receive.payload().limit()); transmissions.clearReceive(); + if (!transmissions.hasReceive()) + transmissions.receive = new NetworkReceive(transmissions.id); } } - } - /* write to any sockets that have space in their buffer and for which we have data */ - if (key.isWritable()) { - if (!channel.isReady()) { - int status = channel.connect(key.isReadable(), key.isWritable()); - if (status == 0) - key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT | SelectionKey.OP_READ); - else - key.interestOps(status); - } else { + /* write to any sockets that have space in their buffer and for which we have data */ + if (key.isWritable()) { transmissions.send.writeTo(channel); if (transmissions.send.remaining() <= 0) { this.completedSends.add(transmissions.send); this.sensors.recordBytesSent(transmissions.id, transmissions.send.size()); transmissions.clearSend(); - key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); + channel.removeInterestOps(SelectionKey.OP_WRITE); } } } - /* cancel any defunct sockets */ if (!key.isValid()) { close(key); @@ -377,7 +338,8 @@ public class Selector implements Selectable { } private void mute(SelectionKey key) { - key.interestOps(key.interestOps() & ~SelectionKey.OP_READ); + Channel channel = channel(key); + channel.mute(); } @Override @@ -386,7 +348,8 @@ public class Selector implements Selectable { } private void unmute(SelectionKey key) { - key.interestOps(key.interestOps() | SelectionKey.OP_READ); + Channel channel = channel(key); + channel.unmute(); } @Override @@ -475,6 +438,10 @@ public class Selector implements Selectable { return this.channels.get(key); } + protected Channel channelForId(int id) { + return channel(keyForId(id)); + } + /** * The id and in-progress send and receive associated with a connection */ diff --git a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java index 0531d6f..05750f4 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java @@ -27,7 +27,6 @@ import java.nio.channels.SocketChannel; import java.io.DataInputStream; import java.io.DataOutputStream; -import javax.net.ssl.SSLSession; import java.security.Principal; @@ -40,12 +39,6 @@ public interface TransportLayer { */ void close() throws IOException; - - /** - * Tells wheather or not this channel is open. - */ - boolean isOpen(); - /** * Writes a sequence of bytes to this channel from the given buffer. */ @@ -55,45 +48,61 @@ public interface TransportLayer { long write(ByteBuffer[] srcs, int offset, int length) throws IOException; + /** + * Reads sequence of bytes from the channel to the given buffer. + */ int read(ByteBuffer dst) throws IOException; long read(ByteBuffer[] dsts) throws IOException; long read(ByteBuffer[] dsts, int offset, int length) throws IOException; + + /** + * Returns true if the channel has handshake and authenticaiton done. + */ boolean isReady(); - boolean finishConnect() throws IOException; + /** + * Retruns true if socketChannel is open. + */ + boolean isOpen(); + + /** + * calls internal socketChannel.finishConnect() + */ + void finishConnect() throws IOException; + /** + * disconnect socketChannel + */ + void disconnect(); + + /** + * returns underlying socketChannel + */ SocketChannel socketChannel(); /** * Performs SSL handshake hence is a no-op for the non-secure * implementation - * @param read Unused in non-secure implementation - * @param write Unused in non-secure implementation - * @return Always return 0 * @throws IOException */ - int handshake(boolean read, boolean write) throws IOException; + void handshake() throws IOException; + DataInputStream inStream() throws IOException; DataOutputStream outStream() throws IOException; - boolean flush(ByteBuffer buffer) throws IOException; - - /** * returns SSLSession.getPeerPrinicpal if SSLTransportLayer used * for non-secure returns a "ANONYMOUS" as the peerPrincipal */ Principal peerPrincipal() throws IOException; - /** - * returns a SSL Session after the handshake is established - * throws IlleagalStateException if the handshake is not established - * throws UnsupportedOperationException for non-secure implementation - */ - SSLSession sslSession() throws IllegalStateException, UnsupportedOperationException; + void addInterestOps(int ops); + + void removeInterestOps(int ops); + } diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/SecurityProtocol.java b/clients/src/main/java/org/apache/kafka/common/protocol/SecurityProtocol.java index d663f7a..a624741 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/SecurityProtocol.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/SecurityProtocol.java @@ -24,8 +24,8 @@ import java.util.Map; public enum SecurityProtocol { /** Un-authenticated, non-encrypted channel */ PLAINTEXT(0, "PLAINTEXT"), - /** SSL channe */ - SSL(1, "PLAINTEXT"), + /** SSL channel */ + SSL(1, "SSL"), /** Currently identical to PLAINTEXT and used for testing only. We may implement extra instrumentation when testing channel code. */ TRACE(Short.MAX_VALUE, "TRACE"); diff --git a/clients/src/main/java/org/apache/kafka/common/security/auth/DefaultPrincipalBuilder.java b/clients/src/main/java/org/apache/kafka/common/security/auth/DefaultPrincipalBuilder.java index d594e4d..fbbeb9e 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/auth/DefaultPrincipalBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/security/auth/DefaultPrincipalBuilder.java @@ -17,15 +17,19 @@ package org.apache.kafka.common.security.auth; +import java.util.Map; import java.security.Principal; import org.apache.kafka.common.network.TransportLayer; import org.apache.kafka.common.network.Authenticator; import org.apache.kafka.common.KafkaException; +/** DefaultPrincipalBuilder which return transportLayer's peer Principal **/ public class DefaultPrincipalBuilder implements PrincipalBuilder { + public void configure(Map configs) {} + public Principal buildPrincipal(TransportLayer transportLayer, Authenticator authenticator) throws KafkaException { try { return transportLayer.peerPrincipal(); @@ -34,4 +38,6 @@ public class DefaultPrincipalBuilder implements PrincipalBuilder { } } + public void close() throws KafkaException {} + } diff --git a/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipal.java b/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipal.java index 4a7ace8..277b6ef 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipal.java +++ b/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipal.java @@ -28,6 +28,7 @@ public class KafkaPrincipal implements Principal { this.name = name; } + @Override public boolean equals(Object object) { if (this == object) return true; @@ -39,14 +40,17 @@ public class KafkaPrincipal implements Principal { return false; } + @Override public int hashCode() { return name.hashCode(); } + @Override public String getName() { return name; } + @Override public String toString() { return name; } diff --git a/clients/src/main/java/org/apache/kafka/common/security/auth/PrincipalBuilder.java b/clients/src/main/java/org/apache/kafka/common/security/auth/PrincipalBuilder.java index 5b39222..b7cc378 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/auth/PrincipalBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/security/auth/PrincipalBuilder.java @@ -20,13 +20,21 @@ package org.apache.kafka.common.security.auth; /* * PrincipalBuilder for Authenticator */ + import org.apache.kafka.common.network.TransportLayer; import org.apache.kafka.common.network.Authenticator; import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.Configurable; +import java.util.Map; import java.security.Principal; -public interface PrincipalBuilder { +public interface PrincipalBuilder extends Configurable { + + /** + * configure this class with give key-value pair + */ + public void configure(Map configs); /** * Returns Principal @@ -35,4 +43,9 @@ public interface PrincipalBuilder { */ Principal buildPrincipal(TransportLayer transportLayer, Authenticator authenticator) throws KafkaException; + /** + * Close this PrincipalBuilder + */ + public void close() throws KafkaException; + } diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java index 9382060..5b21eac 100755 --- a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -48,7 +48,7 @@ public class Utils { /** * Turn the given UTF8 byte array into a string - * + * * @param bytes The byte array * @return The string */ @@ -62,7 +62,7 @@ public class Utils { /** * Turn a string into a utf8 byte[] - * + * * @param string The string * @return The byte[] */ @@ -76,7 +76,7 @@ public class Utils { /** * Read an unsigned integer from the current position in the buffer, incrementing the position by 4 bytes - * + * * @param buffer The buffer to read from * @return The integer read, as a long to avoid signedness */ @@ -86,7 +86,7 @@ public class Utils { /** * Read an unsigned integer from the given position without modifying the buffers position - * + * * @param buffer the buffer to read from * @param index the index from which to read the integer * @return The integer read, as a long to avoid signedness @@ -97,12 +97,12 @@ public class Utils { /** * Read an unsigned integer stored in little-endian format from the {@link InputStream}. - * + * * @param in The stream to read from * @return The integer read (MUST BE TREATED WITH SPECIAL CARE TO AVOID SIGNEDNESS) */ public static int readUnsignedIntLE(InputStream in) throws IOException { - return (in.read() << 8 * 0) + return (in.read() << 8 * 0) | (in.read() << 8 * 1) | (in.read() << 8 * 2) | (in.read() << 8 * 3); @@ -111,7 +111,7 @@ public class Utils { /** * Read an unsigned integer stored in little-endian format from a byte array * at a given offset. - * + * * @param buffer The byte array to read from * @param offset The position in buffer to read from * @return The integer read (MUST BE TREATED WITH SPECIAL CARE TO AVOID SIGNEDNESS) @@ -125,7 +125,7 @@ public class Utils { /** * Write the given long value as a 4 byte unsigned integer. Overflow is ignored. - * + * * @param buffer The buffer to write to * @param value The value to write */ @@ -135,7 +135,7 @@ public class Utils { /** * Write the given long value as a 4 byte unsigned integer. Overflow is ignored. - * + * * @param buffer The buffer to write to * @param index The position in the buffer at which to begin writing * @param value The value to write @@ -146,7 +146,7 @@ public class Utils { /** * Write an unsigned integer in little-endian format to the {@link OutputStream}. - * + * * @param out The stream to write to * @param value The value to write */ @@ -160,7 +160,7 @@ public class Utils { /** * Write an unsigned integer in little-endian format to a byte array * at a given offset. - * + * * @param buffer The byte array to write to * @param offset The position in buffer to write to * @param value The value to write @@ -183,7 +183,7 @@ public class Utils { /** * Get the length for UTF8-encoding a string without encoding it first - * + * * @param s The string to calculate the length for * @return The length when serialized */ @@ -229,7 +229,7 @@ public class Utils { /** * Check that the parameter t is not null - * + * * @param t The object to check * @return t if it isn't null * @throws NullPointerException if t is null. @@ -353,7 +353,7 @@ public class Utils { public static String join(T[] strs, String seperator) { return join(Arrays.asList(strs), seperator); } - + /** * Create a string representation of a list joined by the given separator * @param list The list of items @@ -366,7 +366,7 @@ public class Utils { while (iter.hasNext()) { sb.append(iter.next()); if (iter.hasNext()) - sb.append(seperator); + sb.append(seperator); } return sb.toString(); } @@ -460,7 +460,7 @@ public class Utils { /** * Attempt to read a file as a string - * @throws IOException + * @throws IOException */ public static String readFileAsString(String path, Charset charset) throws IOException { if (charset == null) charset = Charset.defaultCharset(); @@ -480,6 +480,12 @@ public class Utils { return Utils.readFileAsString(path, Charset.defaultCharset()); } + /** + * Check if the given ByteBuffer capacity + * @param existingBuffer ByteBuffer capacity to check + * @param newLength new length for the ByteBuffer. + * returns ByteBuffer + */ public static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength) { if (newLength > existingBuffer.capacity()) { ByteBuffer newBuffer = ByteBuffer.allocate(newLength); diff --git a/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java b/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java index 7f34738..ce4c201 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java +++ b/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java @@ -12,9 +12,11 @@ */ package org.apache.kafka.common.network; -import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.common.config.SecurityConfigs; import org.apache.kafka.common.protocol.SecurityProtocol; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSocket; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; @@ -24,7 +26,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Map; import java.util.List; - +import java.util.concurrent.atomic.AtomicBoolean; /** @@ -37,14 +39,16 @@ class EchoServer extends Thread { private final List sockets; private SecurityProtocol protocol = SecurityProtocol.PLAINTEXT; private SSLFactory sslFactory; + private final AtomicBoolean renegotiate = new AtomicBoolean(); public EchoServer(Map configs) throws Exception { - this.protocol = configs.containsKey(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG) ? - SecurityProtocol.valueOf((String) configs.get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)) : SecurityProtocol.PLAINTEXT; + this.protocol = configs.containsKey(SecurityConfigs.SECURITY_PROTOCOL_CONFIG) ? + SecurityProtocol.valueOf((String) configs.get(SecurityConfigs.SECURITY_PROTOCOL_CONFIG)) : SecurityProtocol.PLAINTEXT; if (protocol == SecurityProtocol.SSL) { this.sslFactory = new SSLFactory(SSLFactory.Mode.SERVER); this.sslFactory.configure(configs); - this.serverSocket = sslFactory.createSSLServerSocketFactory().createServerSocket(0); + SSLContext sslContext = this.sslFactory.sslContext(); + this.serverSocket = sslContext.getServerSocketFactory().createServerSocket(0); } else { this.serverSocket = new ServerSocket(0); } @@ -53,6 +57,9 @@ class EchoServer extends Thread { this.sockets = Collections.synchronizedList(new ArrayList()); } + public void renegotiate() { + renegotiate.set(true); + } @Override public void run() { @@ -68,6 +75,10 @@ class EchoServer extends Thread { DataOutputStream output = new DataOutputStream(socket.getOutputStream()); while (socket.isConnected() && !socket.isClosed()) { int size = input.readInt(); + if (renegotiate.get()) { + renegotiate.set(false); + ((SSLSocket) socket).startHandshake(); + } byte[] bytes = new byte[size]; input.readFully(bytes); output.writeInt(size); diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java index 02a3eff..9e3926c 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java @@ -21,6 +21,7 @@ import org.apache.kafka.test.TestSSLUtils; import org.junit.Test; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertTrue; @@ -32,24 +33,26 @@ public class SSLFactoryTest { @Test public void testSSLFactoryConfiguration() throws Exception { - Map> sslConfigs = TestSSLUtils.createSSLConfigs(false, true); - Map serverSSLConfig = sslConfigs.get(SSLFactory.Mode.SERVER); + Map> sslConfigs = TestSSLUtils.createSSLConfigs(false, true); + Map serverSSLConfig = sslConfigs.get(SSLFactory.Mode.SERVER); SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER); sslFactory.configure(serverSSLConfig); - SSLEngine engine = sslFactory.createSSLEngine("localhost", 9093); + //host and port are hints + SSLEngine engine = sslFactory.createSSLEngine("localhost", 0); assertNotNull(engine); String[] expectedProtocols = {"TLSv1.2"}; - assertEquals(expectedProtocols, engine.getEnabledProtocols()); + assertArrayEquals(expectedProtocols, engine.getEnabledProtocols()); assertEquals(false, engine.getUseClientMode()); } @Test public void testClientMode() throws Exception { - Map> sslConfigs = TestSSLUtils.createSSLConfigs(false, true); - Map clientSSLConfig = sslConfigs.get(SSLFactory.Mode.CLIENT); + Map> sslConfigs = TestSSLUtils.createSSLConfigs(false, true); + Map clientSSLConfig = sslConfigs.get(SSLFactory.Mode.CLIENT); SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT); sslFactory.configure(clientSSLConfig); - SSLEngine engine = sslFactory.createSSLEngine("localhost", 9093); + //host and port are hints + SSLEngine engine = sslFactory.createSSLEngine("localhost", 0); assertTrue(engine.getUseClientMode()); } diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java index c5f8ecf..ab9e6b9 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java @@ -13,17 +13,29 @@ package org.apache.kafka.common.network; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import java.util.LinkedHashMap; import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Semaphore; + import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; -import java.util.LinkedHashMap; +import java.nio.channels.SocketChannel; +import java.nio.channels.SelectionKey; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLEngineResult; +import org.apache.kafka.common.config.SecurityConfigs; +import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.security.auth.PrincipalBuilder; import org.apache.kafka.common.utils.MockTime; -import org.apache.kafka.test.TestSSLUtils; import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.test.TestSSLUtils; import org.apache.kafka.test.TestUtils; import org.junit.After; import org.junit.Before; @@ -38,14 +50,22 @@ public class SSLSelectorTest { private static final int BUFFER_SIZE = 4 * 1024; private EchoServer server; - private Selectable selector; + private Selector selector; + private ChannelBuilder channelBuilder; @Before public void setup() throws Exception { - Map> sslConfigs = TestSSLUtils.createSSLConfigs(false, true); - this.server = new EchoServer(sslConfigs.get(SSLFactory.Mode.SERVER)); + Map> sslConfigs = TestSSLUtils.createSSLConfigs(false, true); + Map sslServerConfigs = sslConfigs.get(SSLFactory.Mode.SERVER); + sslServerConfigs.put(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SecurityConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); + this.server = new EchoServer(sslServerConfigs); this.server.start(); - this.selector = new Selector(new Metrics(), new MockTime(), "MetricGroup", new LinkedHashMap(), sslConfigs.get(SSLFactory.Mode.CLIENT)); + Map sslClientConfigs = sslConfigs.get(SSLFactory.Mode.CLIENT); + sslClientConfigs.put(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SecurityConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); + + this.channelBuilder = new MockSSLChannelBuilder(); + this.channelBuilder.configure(sslClientConfigs); + this.selector = new Selector(new Metrics(), new MockTime(), "MetricGroup", new LinkedHashMap(), channelBuilder); } @After @@ -88,6 +108,23 @@ public class SSLSelectorTest { } + /** + * Validate that the client can intentionally disconnect and reconnect + */ + @Test + public void testClientDisconnect() throws Exception { + int node = 0; + blockingConnect(node); + selector.disconnect(node); + selector.send(createSend(node, "hello1")); + selector.poll(10L); + assertEquals("Request should not have succeeded", 0, selector.completedSends().size()); + assertEquals("There should be a disconnect", 1, selector.disconnected().size()); + assertTrue("The disconnect should be from our node", selector.disconnected().contains(node)); + blockingConnect(node); + assertEquals("hello2", blockingRequest(node, "hello2")); + } + /** * Tests wrap BUFFER_OVERFLOW and unwrap BUFFER_UNDERFLOW * @throws Exception @@ -103,10 +140,128 @@ public class SSLSelectorTest { sendAndReceive(node, requestPrefix, 0, reqs); } + /** + * Test sending an empty string + */ + @Test + public void testEmptyRequest() throws Exception { + int node = 0; + blockingConnect(node); + assertEquals("", blockingRequest(node, "")); + } + + @Test + public void testMute() throws Exception { + blockingConnect(0); + blockingConnect(1); + + selector.send(createSend(0, "hello")); + selector.send(createSend(1, "hi")); + selector.mute(1); + + while (selector.completedReceives().isEmpty()) + selector.poll(5); + assertEquals("We should have only one response", 1, selector.completedReceives().size()); + assertEquals("The response should not be from the muted node", 0, selector.completedReceives().get(0).source()); + selector.unmute(1); + do { + selector.poll(5); + } while (selector.completedReceives().isEmpty()); + assertEquals("We should have only one response", 1, selector.completedReceives().size()); + assertEquals("The response should be from the previously muted node", 1, selector.completedReceives().get(0).source()); + } + + /** + * Tests that SSL renegotiation initiated by the server are handled correctly by the client + * @throws Exception + */ + @Test + public void testRenegotiation() throws Exception { + int reqs = 500; + int node = 0; + + // create connections + InetSocketAddress addr = new InetSocketAddress("localhost", server.port); + selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); + + // send echo requests and receive responses + int requests = 0; + int responses = 0; + int renegotiates = 0; + selector.send(createSend(node, node + "-" + 0)); + requests++; + + // loop until we complete all requests + while (responses < reqs) { + selector.poll(0L); + if (responses >= 100 && renegotiates == 0) { + renegotiates++; + server.renegotiate(); + } + assertEquals("No disconnects should have occurred.", 0, selector.disconnected().size()); + + // handle any responses we may have gotten + for (NetworkReceive receive : selector.completedReceives()) { + String[] pieces = asString(receive).split("-"); + assertEquals("Receive text should be in the form 'conn-counter'", 2, pieces.length); + assertEquals("Check the source", receive.source(), Integer.parseInt(pieces[0])); + assertEquals("Receive ByteBuffer position should be at 0", 0, receive.payload().position()); + assertEquals("Check the request counter", responses, Integer.parseInt(pieces[1])); + responses++; + } + + // prepare new sends for the next round + for (int i = 0; i < selector.completedSends().size() && requests < reqs; i++, requests++) { + selector.send(createSend(node, node + "-" + requests)); + } + } + } + + @Test + public void testLongDeferredTasks() throws Exception { + final int fastNode = 0; + final int slowNode = 1; + + // create connections + InetSocketAddress addr = new InetSocketAddress("localhost", server.port); + selector.connect(fastNode, addr, BUFFER_SIZE, BUFFER_SIZE); + selector.connect(slowNode, addr, BUFFER_SIZE, BUFFER_SIZE); + + sendAndReceive(fastNode, String.valueOf(fastNode), 0, 10); + sendAndReceive(slowNode, String.valueOf(slowNode), 0, 10); + + Semaphore delegatedTaskSemaphore = new Semaphore(0); + Channel channel = selector.channelForId(slowNode); + MockSSLTransportLayer sslTransportLayer = (MockSSLTransportLayer) channel.transportLayer(); + + sslTransportLayer.delegatedTaskSemaphore = delegatedTaskSemaphore; + // set renegotiate flag and send a message to trigger renegotiation on the slow channel + server.renegotiate(); + selector.send(createSend(slowNode, String.valueOf(slowNode) + "-" + 11)); + while (sslTransportLayer.engine.getHandshakeStatus() != SSLEngineResult.HandshakeStatus.NEED_TASK) { + selector.poll(1L); + } + + // Slow channel is now blocked on the delegated task. Check that fast channel is able to make progress + sendAndReceive(fastNode, String.valueOf(fastNode), 10, 20); + + // Allow slow channel to continue and check that it works as expected + delegatedTaskSemaphore.release(10); + selector.send(createSend(slowNode, String.valueOf(slowNode) + "-" + 12)); + int responses = 11; + while (responses <= 12) { + selector.poll(0L); + for (NetworkReceive receive : selector.completedReceives()) { + assertEquals(slowNode + "-" + responses, asString(receive)); + responses++; + } + } + } + + private String blockingRequest(int node, String s) throws IOException { selector.send(createSend(node, s)); - selector.poll(1000L); while (true) { selector.poll(1000L); for (NetworkReceive receive : selector.completedReceives()) @@ -152,4 +307,75 @@ public class SSLSelectorTest { } } } + + // Channel builder with MockSSLTransportLayer. + private static class MockSSLChannelBuilder implements ChannelBuilder { + private SSLFactory sslFactory; + private ExecutorService executorService; + private PrincipalBuilder principalBuilder; + + public void configure(Map configs) throws KafkaException { + try { + this.executorService = Executors.newScheduledThreadPool(1); + this.sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT); + this.sslFactory.configure(configs); + this.principalBuilder = (PrincipalBuilder) Utils.newInstance((Class) configs.get(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG)); + this.principalBuilder.configure(configs); + } catch (Exception e) { + throw new KafkaException(e); + } + } + + + @Override + public Channel buildChannel(SelectionKey key) throws KafkaException { + Channel channel = null; + try { + SocketChannel socketChannel = (SocketChannel) key.channel(); + MockSSLTransportLayer transportLayer = new MockSSLTransportLayer(key, + sslFactory.createSSLEngine(socketChannel.socket().getInetAddress().getHostName(), + socketChannel.socket().getPort()), + executorService); + Authenticator authenticator = new DefaultAuthenticator(transportLayer, this.principalBuilder); + channel = new Channel(transportLayer, authenticator); + } catch (Exception e) { + throw new KafkaException(e); + } + return channel; + } + + public void close() { + this.executorService.shutdown(); + this.principalBuilder.close(); + } + } + + private static class MockSSLTransportLayer extends SSLTransportLayer { + private final SSLEngine engine; + private boolean engineClosed; + private Semaphore delegatedTaskSemaphore; + + public MockSSLTransportLayer(SelectionKey key, SSLEngine engine, ExecutorService executorService) throws IOException { + super(key, engine, executorService); + this.engine = engine; + } + + @Override + protected Runnable delegatedTask() { + final Runnable task = super.delegatedTask(); + return task == null ? null : new Runnable() { + @Override + public void run() { + if (delegatedTaskSemaphore != null) { + try { + delegatedTaskSemaphore.acquire(); + } catch (InterruptedException e) { + } + } + task.run(); + } + }; + } + } + } diff --git a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java index 3fd8fe2..4bb95f0 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java @@ -24,6 +24,7 @@ import java.nio.ByteBuffer; import java.util.LinkedHashMap; import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.config.SecurityConfigs; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.test.TestUtils; @@ -40,13 +41,17 @@ public class SelectorTest { private EchoServer server; private Selectable selector; + private ChannelBuilder channelBuilder; @Before public void setup() throws Exception { - Map configs = new HashMap(); + Map configs = new HashMap(); + configs.put(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SecurityConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); this.server = new EchoServer(configs); this.server.start(); - this.selector = new Selector(new Metrics(), new MockTime() , "MetricGroup", new LinkedHashMap(), configs); + this.channelBuilder = new PlainTextChannelBuilder(); + this.channelBuilder.configure(configs); + this.selector = new Selector(new Metrics(), new MockTime() , "MetricGroup", new LinkedHashMap(), channelBuilder); } @After @@ -196,6 +201,17 @@ public class SelectorTest { assertEquals(big, blockingRequest(node, big)); } + @Test + public void testShortMessageSequence() throws Exception { + int bufferSize = 512 * 1024; + int node = 0; + int reqs = 50; + InetSocketAddress addr = new InetSocketAddress("localhost", server.port); + selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); + String requestPrefix = "hello-wordl"; + sendAndReceive(node, requestPrefix, 0, reqs); + } + /** * Test sending an empty string */ @@ -261,5 +277,27 @@ public class SelectorTest { return new String(Utils.toArray(receive.payload())); } + private void sendAndReceive(int node, String requestPrefix, int startIndex, int endIndex) throws Exception { + int requests = startIndex; + int responses = startIndex; + selector.send(createSend(node, requestPrefix + "-" + startIndex)); + requests++; + while (responses < endIndex) { + // do the i/o + selector.poll(0L); + assertEquals("No disconnects should have occurred.", 0, selector.disconnected().size()); + + // handle requests and responses of the fast node + for (NetworkReceive receive : selector.completedReceives()) { + assertEquals(requestPrefix + "-" + responses, asString(receive)); + responses++; + } + + for (int i = 0; i < selector.completedSends().size() && requests < endIndex; i++, requests++) { + selector.send(createSend(node, requestPrefix + "-" + requests)); + } + } + } + } diff --git a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java index 590f1f5..b34d085 100644 --- a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java +++ b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java @@ -17,28 +17,36 @@ package org.apache.kafka.test; -import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.common.config.SecurityConfigs; import org.apache.kafka.common.network.SSLFactory; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.math.BigInteger; -import java.security.GeneralSecurityException; -import java.security.Key; -import java.security.KeyPair; -import java.security.KeyPairGenerator; -import java.security.KeyStore; -import java.security.NoSuchAlgorithmException; -import java.security.SecureRandom; +import javax.net.ssl.TrustManagerFactory; +import java.security.*; import java.security.cert.Certificate; +import java.security.cert.CertificateException; import java.security.cert.X509Certificate; -import java.security.InvalidKeyException; -import java.security.NoSuchProviderException; -import java.security.SignatureException; -import java.security.cert.CertificateEncodingException; -import javax.security.auth.x500.X500Principal; -import org.bouncycastle.x509.X509V1CertificateGenerator; + +import org.bouncycastle.asn1.x500.X500Name; +import org.bouncycastle.asn1.x509.AlgorithmIdentifier; +import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; +import org.bouncycastle.cert.X509CertificateHolder; +import org.bouncycastle.cert.X509v1CertificateBuilder; +import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; +import org.bouncycastle.crypto.params.AsymmetricKeyParameter; +import org.bouncycastle.crypto.util.PrivateKeyFactory; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.operator.ContentSigner; +import org.bouncycastle.operator.DefaultDigestAlgorithmIdentifierFinder; +import org.bouncycastle.operator.DefaultSignatureAlgorithmIdentifierFinder; +import org.bouncycastle.operator.bc.BcRSAContentSignerBuilder; + + + + import java.util.Date; import java.util.HashMap; @@ -63,24 +71,28 @@ public class TestSSLUtils { */ public static X509Certificate generateCertificate(String dn, KeyPair pair, int days, String algorithm) - throws CertificateEncodingException, InvalidKeyException, IllegalStateException, - NoSuchProviderException, NoSuchAlgorithmException, SignatureException { - Date from = new Date(); - Date to = new Date(from.getTime() + days * 86400000L); - BigInteger sn = new BigInteger(64, new SecureRandom()); - KeyPair keyPair = pair; - X509V1CertificateGenerator certGen = new X509V1CertificateGenerator(); - X500Principal dnName = new X500Principal(dn); - - certGen.setSerialNumber(sn); - certGen.setIssuerDN(dnName); - certGen.setNotBefore(from); - certGen.setNotAfter(to); - certGen.setSubjectDN(dnName); - certGen.setPublicKey(keyPair.getPublic()); - certGen.setSignatureAlgorithm(algorithm); - X509Certificate cert = certGen.generate(pair.getPrivate()); - return cert; + throws CertificateException { + + try { + Security.addProvider(new BouncyCastleProvider()); + AlgorithmIdentifier sigAlgId = new DefaultSignatureAlgorithmIdentifierFinder().find(algorithm); + AlgorithmIdentifier digAlgId = new DefaultDigestAlgorithmIdentifierFinder().find(sigAlgId); + AsymmetricKeyParameter privateKeyAsymKeyParam = PrivateKeyFactory.createKey(pair.getPrivate().getEncoded()); + SubjectPublicKeyInfo subPubKeyInfo = SubjectPublicKeyInfo.getInstance(pair.getPublic().getEncoded()); + ContentSigner sigGen = new BcRSAContentSignerBuilder(sigAlgId, digAlgId).build(privateKeyAsymKeyParam); + X500Name name = new X500Name(dn); + Date from = new Date(); + Date to = new Date(from.getTime() + days * 86400000L); + BigInteger sn = new BigInteger(64, new SecureRandom()); + + X509v1CertificateBuilder v1CertGen = new X509v1CertificateBuilder(name, sn, from, to, name, subPubKeyInfo); + X509CertificateHolder certificateHolder = v1CertGen.build(sigGen); + return new JcaX509CertificateConverter().setProvider("BC").getCertificate(certificateHolder); + } catch (CertificateException ce) { + throw ce; + } catch (Exception e) { + throw new CertificateException(e); + } } public static KeyPair generateKeyPair(String algorithm) throws NoSuchAlgorithmException { @@ -163,33 +175,33 @@ public class TestSSLUtils { public static Map createSSLConfig(SSLFactory.Mode mode, File keyStoreFile, String password, String keyPassword, File trustStoreFile, String trustStorePassword, boolean useClientCert) { Map sslConfigs = new HashMap(); - sslConfigs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL"); // kafka security protocol - sslConfigs.put(CommonClientConfigs.SSL_PROTOCOL_CONFIG, "TLS"); // protocol to create SSLContext + sslConfigs.put(SecurityConfigs.SECURITY_PROTOCOL_CONFIG, "SSL"); // kafka security protocol + sslConfigs.put(SecurityConfigs.SSL_PROTOCOL_CONFIG, "TLSv1.2"); // protocol to create SSLContext if (mode == SSLFactory.Mode.SERVER || (mode == SSLFactory.Mode.CLIENT && keyStoreFile != null)) { - sslConfigs.put(CommonClientConfigs.SSL_KEYSTORE_LOCATION_CONFIG, keyStoreFile.getPath()); - sslConfigs.put(CommonClientConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS"); - sslConfigs.put(CommonClientConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, "SunX509"); - sslConfigs.put(CommonClientConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, password); - sslConfigs.put(CommonClientConfigs.SSL_KEY_PASSWORD_CONFIG, keyPassword); + sslConfigs.put(SecurityConfigs.SSL_KEYSTORE_LOCATION_CONFIG, keyStoreFile.getPath()); + sslConfigs.put(SecurityConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS"); + sslConfigs.put(SecurityConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, TrustManagerFactory.getDefaultAlgorithm()); + sslConfigs.put(SecurityConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, password); + sslConfigs.put(SecurityConfigs.SSL_KEY_PASSWORD_CONFIG, keyPassword); } - sslConfigs.put(CommonClientConfigs.SSL_CLIENT_REQUIRE_CERT_CONFIG, useClientCert); - sslConfigs.put(CommonClientConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, trustStoreFile.getPath()); - sslConfigs.put(CommonClientConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, trustStorePassword); - sslConfigs.put(CommonClientConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "JKS"); - sslConfigs.put(CommonClientConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, "SunX509"); + sslConfigs.put(SecurityConfigs.SSL_CLIENT_REQUIRE_CERT_CONFIG, useClientCert); + sslConfigs.put(SecurityConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, trustStoreFile.getPath()); + sslConfigs.put(SecurityConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, trustStorePassword); + sslConfigs.put(SecurityConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "JKS"); + sslConfigs.put(SecurityConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, TrustManagerFactory.getDefaultAlgorithm()); List enabledProtocols = new ArrayList(); enabledProtocols.add("TLSv1.2"); - sslConfigs.put(CommonClientConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, enabledProtocols); + sslConfigs.put(SecurityConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, enabledProtocols); return sslConfigs; } - public static Map> createSSLConfigs(boolean useClientCert, boolean trustStore) + public static Map> createSSLConfigs(boolean useClientCert, boolean trustStore) throws IOException, GeneralSecurityException { - Map> sslConfigs = new HashMap>(); + Map> sslConfigs = new HashMap>(); Map certs = new HashMap(); File trustStoreFile = File.createTempFile("truststore", ".jks"); File clientKeyStoreFile = null; -- 2.4.6 From ca0456dc01def337ee1711cabd9c4e9df4af61ee Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Wed, 20 May 2015 14:23:29 -0700 Subject: [PATCH 08/30] KAFKA-1690. new java producer needs ssl support as a client. Addressing reviews. --- .../main/java/org/apache/kafka/common/network/SSLTransportLayer.java | 4 ---- .../main/java/org/apache/kafka/common/network/TransportLayer.java | 5 ----- 2 files changed, 9 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index f25e537..770f4ae 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -324,10 +324,6 @@ public class SSLTransportLayer implements TransportLayer { closed = !netWriteBuffer.hasRemaining() && (handshake.getHandshakeStatus() != HandshakeStatus.NEED_WRAP); } - public boolean isOpen() { - return socketChannel.isOpen(); - } - public boolean isReady() { return handshakeComplete; } diff --git a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java index 05750f4..002a695 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java @@ -64,11 +64,6 @@ public interface TransportLayer { boolean isReady(); /** - * Retruns true if socketChannel is open. - */ - boolean isOpen(); - - /** * calls internal socketChannel.finishConnect() */ void finishConnect() throws IOException; -- 2.4.6 From 7e3a4cfc58932aab4288677111af52f94c9012b6 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Wed, 20 May 2015 14:37:52 -0700 Subject: [PATCH 09/30] KAFKA-1690. new java producer needs ssl support as a client. Addressing reviews. --- clients/src/main/java/org/apache/kafka/common/network/Channel.java | 1 - 1 file changed, 1 deletion(-) diff --git a/clients/src/main/java/org/apache/kafka/common/network/Channel.java b/clients/src/main/java/org/apache/kafka/common/network/Channel.java index f7dda3e..11873a3 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Channel.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Channel.java @@ -53,7 +53,6 @@ public class Channel implements ScatteringByteChannel, GatheringByteChannel { /** * returns user principal for the session * In case of PLAINTEXT and No Authentication returns ANONYMOUS as the userPrincipal - * If SSL used than * If SSL used without any SASL Authentication returns SSLSession.peerPrincipal */ public Principal principal() throws IOException { -- 2.4.6 From 9bdc0000eb24f8682184f7fb39578f239a7b6dde Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Thu, 21 May 2015 09:50:52 -0700 Subject: [PATCH 10/30] KAFKA-1690. new java producer needs ssl support as a client. Fixed minor issues with the patch. --- checkstyle/checkstyle.xml | 24 +++++++++++----------- checkstyle/import-control.xml | 21 ++++++++----------- .../apache/kafka/clients/CommonClientConfigs.java | 14 ++++++------- .../java/org/apache/kafka/test/TestSSLUtils.java | 3 +-- 4 files changed, 28 insertions(+), 34 deletions(-) diff --git a/checkstyle/checkstyle.xml b/checkstyle/checkstyle.xml index 5031415..a215ff3 100644 --- a/checkstyle/checkstyle.xml +++ b/checkstyle/checkstyle.xml @@ -1,6 +1,6 @@ +--> - + - + - + - + @@ -39,7 +39,7 @@ - + @@ -60,12 +60,12 @@ - + - + @@ -80,4 +80,4 @@ - + \ No newline at end of file diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index 1ebe211..9145d87 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -1,6 +1,6 @@ +"-//Puppy Crawl//DTD Import Control 1.1//EN" +"http://www.puppycrawl.com/dtds/import_control_1_1.dtd"> + @@ -26,8 +27,7 @@ - - + @@ -52,16 +52,12 @@ - - - + + + - - - - @@ -82,7 +78,6 @@ - @@ -107,7 +102,7 @@ - + diff --git a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java index 16507c7..cf32e4e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java +++ b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -17,7 +17,7 @@ package org.apache.kafka.clients; * Some configurations shared by both producer and consumer */ public class CommonClientConfigs { - + /* * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. */ @@ -27,10 +27,10 @@ public class CommonClientConfigs { + "host1:port1,host2:port2,.... Since these servers are just used for the initial connection to " + "discover the full cluster membership (which may change dynamically), this list need not contain the full set of " + "servers (you may want more than one, though, in case a server is down)."; - + public static final String METADATA_MAX_AGE_CONFIG = "metadata.max.age.ms"; public static final String METADATA_MAX_AGE_DOC = "The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions."; - + public static final String SEND_BUFFER_CONFIG = "send.buffer.bytes"; public static final String SEND_BUFFER_DOC = "The size of the TCP send buffer (SO_SNDBUF) to use when sending data."; @@ -45,7 +45,7 @@ public class CommonClientConfigs { public static final String RETRY_BACKOFF_MS_CONFIG = "retry.backoff.ms"; public static final String RETRY_BACKOFF_MS_DOC = "The amount of time to wait before attempting to retry a failed fetch request to a given topic partition. This avoids repeated fetching-and-failing in a tight loop."; - + public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = "metrics.sample.window.ms"; public static final String METRICS_SAMPLE_WINDOW_MS_DOC = "The number of samples maintained to compute metrics."; @@ -55,4 +55,4 @@ public class CommonClientConfigs { public static final String METRIC_REPORTER_CLASSES_CONFIG = "metric.reporters"; public static final String METRIC_REPORTER_CLASSES_DOC = "A list of classes to use as metrics reporters. Implementing the MetricReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics."; -} +} \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java index b34d085..093f33b 100644 --- a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java +++ b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java @@ -66,8 +66,7 @@ public class TestSSLUtils { * @param days how many days from now the Certificate is valid for * @param algorithm the signing algorithm, eg "SHA1withRSA" * @return the self-signed certificate - * @throws IOException thrown if an IO error ocurred. - * @throws GeneralSecurityException thrown if an Security error ocurred. + * @throws CertificateException thrown if a security error or an IO error ocurred. */ public static X509Certificate generateCertificate(String dn, KeyPair pair, int days, String algorithm) -- 2.4.6 From 65396b5cabeaf61579c6e6422848877fc7a896a9 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Thu, 21 May 2015 10:27:11 -0700 Subject: [PATCH 11/30] KAFKA-1690. new java producer needs ssl support as a client. Fixed minor issues with the patch. --- checkstyle/import-control.xml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index 9145d87..3f7c71d 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -27,7 +27,7 @@ - + @@ -52,12 +52,16 @@ - - - + + + + + + + @@ -102,7 +106,7 @@ - + -- 2.4.6 From b37330a7b4ec3adfba4f0c6e33ab172be03406be Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Thu, 28 May 2015 20:57:06 -0700 Subject: [PATCH 12/30] KAFKA-1690. new java producer needs ssl support as a client. --- build.gradle | 8 +- .../org/apache/kafka/common/network/Channel.java | 127 +++++++------- .../kafka/common/network/ChannelBuilder.java | 3 +- .../common/network/PlainTextChannelBuilder.java | 4 +- .../common/network/PlainTextTransportLayer.java | 4 + .../kafka/common/network/SSLChannelBuilder.java | 4 +- .../kafka/common/network/SSLTransportLayer.java | 97 ++++++----- .../org/apache/kafka/common/network/Selector.java | 194 +++++++-------------- .../kafka/common/network/TransportLayer.java | 30 +--- .../kafka/common/network/SSLSelectorTest.java | 128 ++------------ .../apache/kafka/common/network/SelectorTest.java | 6 +- 11 files changed, 214 insertions(+), 391 deletions(-) diff --git a/build.gradle b/build.gradle index 3633152..e3897b3 100644 --- a/build.gradle +++ b/build.gradle @@ -384,8 +384,8 @@ project(':clients') { archives testJar } - checkstyle { - configFile = new File(rootDir, "checkstyle/checkstyle.xml") - } - test.dependsOn('checkstyleMain', 'checkstyleTest') + // checkstyle { + // configFile = new File(rootDir, "checkstyle/checkstyle.xml") + // } + // test.dependsOn('checkstyleMain', 'checkstyleTest') } diff --git a/clients/src/main/java/org/apache/kafka/common/network/Channel.java b/clients/src/main/java/org/apache/kafka/common/network/Channel.java index 11873a3..616bfcf 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Channel.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Channel.java @@ -19,13 +19,8 @@ package org.apache.kafka.common.network; import java.io.IOException; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.nio.ByteBuffer; -import java.nio.channels.ScatteringByteChannel; -import java.nio.channels.GatheringByteChannel; -import java.nio.channels.SocketChannel; +import java.net.Socket; import java.nio.channels.SelectionKey; import java.security.Principal; @@ -34,13 +29,16 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class Channel implements ScatteringByteChannel, GatheringByteChannel { +public class Channel { private static final Logger log = LoggerFactory.getLogger(Channel.class); + private final int id; private TransportLayer transportLayer; private Authenticator authenticator; + private NetworkReceive receive; + private NetworkSend send; - - public Channel(TransportLayer transportLayer, Authenticator authenticator) throws IOException { + public Channel(int id, TransportLayer transportLayer, Authenticator authenticator) throws IOException { + this.id = id; this.transportLayer = transportLayer; this.authenticator = authenticator; } @@ -59,7 +57,10 @@ public class Channel implements ScatteringByteChannel, GatheringByteChannel { return authenticator.principal(); } - public void connect() throws IOException { + /** + * Does handshake of transportLayer and Authentication using configured authenticator + */ + public void prepare() throws IOException { if (transportLayer.isReady() && authenticator.isComplete()) return; if (!transportLayer.isReady()) @@ -72,80 +73,80 @@ public class Channel implements ScatteringByteChannel, GatheringByteChannel { transportLayer.disconnect(); } - public boolean isOpen() { - return transportLayer.socketChannel().isOpen(); - } - - public SocketChannel socketChannel() { - return transportLayer.socketChannel(); - } - - public TransportLayer transportLayer() { - return transportLayer; - } - /** - * Writes a sequence of bytes to this channel from the given buffer. - */ - @Override - public int write(ByteBuffer src) throws IOException { - return transportLayer.write(src); - } - - @Override - public long write(ByteBuffer[] srcs) throws IOException { - return transportLayer.write(srcs); + public void finishConnect() throws IOException { + transportLayer.finishConnect(); } - @Override - public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { - return transportLayer.write(srcs, offset, length); + public int id() { + return id; } - @Override - public int read(ByteBuffer dst) throws IOException { - return transportLayer.read(dst); + public void mute() { + transportLayer.removeInterestOps(SelectionKey.OP_READ); } - @Override - public long read(ByteBuffer[] dsts) throws IOException { - return transportLayer.read(dsts); + public void unmute() { + transportLayer.addInterestOps(SelectionKey.OP_READ); } - @Override - public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { - return transportLayer.read(dsts, offset, length); + public boolean isReady() { + return transportLayer.isReady() && authenticator.isComplete(); } - public void finishConnect() throws IOException { - transportLayer.finishConnect(); + public String socketDescription() { + Socket socket = transportLayer.socketChannel().socket(); + if (socket == null) + return "[unconnected socket]"; + else if (socket.getInetAddress() != null) + return socket.getInetAddress().toString(); + else + return socket.getLocalAddress().toString(); } - public void addInterestOps(int ops) { - transportLayer.addInterestOps(ops); + public void setSend(NetworkSend send) { + if (this.send != null) + throw new IllegalStateException("Attempt to begin a send operation with prior send operation still in progress."); + this.send = send; + this.transportLayer.addInterestOps(SelectionKey.OP_WRITE); } - public void removeInterestOps(int ops) { - transportLayer.removeInterestOps(ops); - } + public NetworkReceive read() throws IOException { + NetworkReceive result = null; - public void mute() { - transportLayer.removeInterestOps(SelectionKey.OP_READ); + if (receive == null) { + receive = new NetworkReceive(id); + } + receive(receive); + if (receive.complete()) { + receive.payload().rewind(); + result = receive; + receive = null; + } + return result; } - public void unmute() { - transportLayer.addInterestOps(SelectionKey.OP_READ); + public NetworkSend write() throws IOException { + NetworkSend result = null; + if (send != null && send(send)) { + result = send; + send = null; + } + return result; } - public boolean isReady() { - return transportLayer.isReady() && authenticator.isComplete(); + private long receive(NetworkReceive receive) throws IOException { + long result = receive.readFrom(transportLayer); + return result; } - public DataInputStream getInputStream() throws IOException { - return transportLayer.inStream(); + private boolean send(NetworkSend send) throws IOException { + send.writeTo(transportLayer); + boolean sendComplete = send.remaining() == 0; + if (sendComplete) { + transportLayer.removeInterestOps(SelectionKey.OP_WRITE); + } + return sendComplete; } - public DataOutputStream getOutputStream() throws IOException { - return transportLayer.outStream(); - } -} + } diff --git a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java index 5dd1aef..8cdd0fc 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java @@ -30,9 +30,10 @@ public interface ChannelBuilder { /** * returns a Channel with TransportLayer and Authenticator configured. + * @param id * @param socketChannel */ - public Channel buildChannel(SelectionKey key) throws KafkaException; + public Channel buildChannel(int id, SelectionKey key) throws KafkaException; /** diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java index 51adce5..ecef62a 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java @@ -37,12 +37,12 @@ public class PlainTextChannelBuilder implements ChannelBuilder { } } - public Channel buildChannel(SelectionKey key) throws KafkaException { + public Channel buildChannel(int id, SelectionKey key) throws KafkaException { Channel channel = null; try { PlainTextTransportLayer transportLayer = new PlainTextTransportLayer(key); Authenticator authenticator = new DefaultAuthenticator(transportLayer, this.principalBuilder); - channel = new Channel(transportLayer, authenticator); + channel = new Channel(id, transportLayer, authenticator); } catch (Exception e) { log.warn("Failed to create channel due to ", e); throw new KafkaException(e); diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java index eb4504b..f730c25 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java @@ -58,6 +58,10 @@ public class PlainTextTransportLayer implements TransportLayer { socketChannel.close(); } + public boolean isOpen() { + return socketChannel.isOpen(); + } + public void disconnect() { key.cancel(); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java index 22fec8b..8766824 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java @@ -43,7 +43,7 @@ public class SSLChannelBuilder implements ChannelBuilder { } } - public Channel buildChannel(SelectionKey key) throws KafkaException { + public Channel buildChannel(int id, SelectionKey key) throws KafkaException { Channel channel = null; try { SocketChannel socketChannel = (SocketChannel) key.channel(); @@ -52,7 +52,7 @@ public class SSLChannelBuilder implements ChannelBuilder { socketChannel.socket().getPort()), executorService); Authenticator authenticator = new DefaultAuthenticator(transportLayer, this.principalBuilder); - channel = new Channel(transportLayer, authenticator); + channel = new Channel(id, transportLayer, authenticator); } catch (Exception e) { log.info("Failed to create channel due to ", e); throw new KafkaException(e); diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index 770f4ae..f79b5ef 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -63,6 +63,7 @@ public class SSLTransportLayer implements TransportLayer { private DataOutputStream outStream; private ExecutorService executorService; private int interestOps; + private int socketSendBufferSize; public SSLTransportLayer(SelectionKey key, SSLEngine sslEngine, ExecutorService executorService) throws IOException { this.key = key; @@ -72,6 +73,7 @@ public class SSLTransportLayer implements TransportLayer { this.netReadBuffer = ByteBuffer.allocateDirect(packetBufferSize()); this.netWriteBuffer = ByteBuffer.allocateDirect(packetBufferSize()); this.appReadBuffer = ByteBuffer.allocateDirect(applicationBufferSize()); + this.socketSendBufferSize = this.socketChannel.socket().getSendBufferSize(); } private void startHandshake() throws IOException { @@ -85,6 +87,8 @@ public class SSLTransportLayer implements TransportLayer { //initiate handshake sslEngine.beginHandshake(); handshakeStatus = sslEngine.getHandshakeStatus(); + if (handshakeStatus == HandshakeStatus.NEED_WRAP) + key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); } public SocketChannel socketChannel() { @@ -95,10 +99,14 @@ public class SSLTransportLayer implements TransportLayer { socketChannel.finishConnect(); removeInterestOps(SelectionKey.OP_CONNECT); addInterestOps(SelectionKey.OP_READ); - key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT); + key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT | SelectionKey.OP_READ); startHandshake(); } + public boolean isOpen() { + return socketChannel.isOpen(); + } + public void disconnect() { key.cancel(); } @@ -135,7 +143,7 @@ public class SSLTransportLayer implements TransportLayer { try { switch(handshakeStatus) { case NEED_TASK: - handshakeStatus = tasks(); + handshakeStatus = runDelegatedTasks(); break; case NEED_WRAP: handshakeResult = handshakeWrap(write); @@ -153,8 +161,8 @@ public class SSLTransportLayer implements TransportLayer { } //fall down to NEED_UNWRAP on the same call, will result in a //BUFFER_UNDERFLOW if it needs data - if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || !flush(netWriteBuffer)) { - key.interestOps(SelectionKey.OP_WRITE); + if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || !flush(netWriteBuffer)) { //check for write bit + key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); break; } case NEED_UNWRAP: @@ -165,7 +173,6 @@ public class SSLTransportLayer implements TransportLayer { if (netReadBuffer.position() >= currentPacketBufferSize) { throw new IllegalStateException("Buffer underflow when there is available data"); } - if (!read) key.interestOps(SelectionKey.OP_READ); } else if (handshakeResult.getStatus() == Status.BUFFER_OVERFLOW) { int currentAppBufferSize = applicationBufferSize(); netReadBuffer = Utils.ensureCapacity(netReadBuffer, currentAppBufferSize); @@ -173,16 +180,20 @@ public class SSLTransportLayer implements TransportLayer { throw new IllegalStateException("Buffer underflow when available data (" + netReadBuffer.position() + ") > packet buffer size (" + currentAppBufferSize + ")"); } - - if (!read) key.interestOps(SelectionKey.OP_READ); } else if (handshakeResult.getStatus() == Status.CLOSED) { throw new EOFException("SSL handshake status CLOSED during handshake UNWRAP"); } //if handshakeStatus completed than fall-through to finished status. //after handshake is finished there is no data left to read/write in socketChannel. //so the selector won't invoke this channel if we don't go through the handshakeFinished here. - if (handshakeStatus != HandshakeStatus.FINISHED) + if (handshakeStatus != HandshakeStatus.FINISHED) { + if (handshakeStatus == HandshakeStatus.NEED_WRAP) { + key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); + } else if (handshakeStatus == HandshakeStatus.NEED_UNWRAP) { + key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); + } break; + } case FINISHED: handshakeFinished(); break; @@ -203,20 +214,13 @@ public class SSLTransportLayer implements TransportLayer { * Executes the SSLEngine tasks needed on the executorservice thread. * @return HandshakeStatus */ - private HandshakeStatus tasks() { - final Runnable task = delegatedTask(); - - if (task != null) { - // un-register read/write ops while the delegated tasks are running. - key.interestOps(0); - executorService.submit(new Runnable() { - @Override - public void run() { - task.run(); - // register read/write ops to continue handshake. - key.interestOps(SelectionKey.OP_READ | SelectionKey.OP_WRITE); - } - }); + private HandshakeStatus runDelegatedTasks() { + for (;;) { + Runnable task = delegatedTask(); + if (task == null) { + break; + } + task.run(); } return sslEngine.getHandshakeStatus(); } @@ -236,7 +240,7 @@ public class SSLTransportLayer implements TransportLayer { if (handshakeComplete) key.interestOps(interestOps); else - key.interestOps(SelectionKey.OP_WRITE); + key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); } else { throw new IOException("NOT_HANDSHAKING during handshake"); } @@ -256,6 +260,10 @@ public class SSLTransportLayer implements TransportLayer { //prepare the results to be written netWriteBuffer.flip(); handshakeStatus = result.getHandshakeStatus(); + if (result.getStatus() == SSLEngineResult.Status.OK && + result.getHandshakeStatus() == HandshakeStatus.NEED_TASK) { + handshakeStatus = runDelegatedTasks(); + } //optimization, if we do have a writable channel, write it now if (doWrite) flush(netWriteBuffer); return result; @@ -287,7 +295,7 @@ public class SSLTransportLayer implements TransportLayer { handshakeStatus = result.getHandshakeStatus(); if (result.getStatus() == SSLEngineResult.Status.OK && result.getHandshakeStatus() == HandshakeStatus.NEED_TASK) { - handshakeStatus = tasks(); + handshakeStatus = runDelegatedTasks(); } cont = result.getStatus() == SSLEngineResult.Status.OK && handshakeStatus == HandshakeStatus.NEED_UNWRAP; @@ -338,6 +346,7 @@ public class SSLTransportLayer implements TransportLayer { public int read(ByteBuffer dst) throws IOException { if (closing || closed) return -1; int read = 0; + if (!handshakeComplete) return read; //if we have unread decrypted data in appReadBuffer read that into dst buffer. if (appReadBuffer.position() > 0) { @@ -346,16 +355,16 @@ public class SSLTransportLayer implements TransportLayer { if (dst.remaining() > 0) { boolean canRead = true; + netReadBuffer = Utils.ensureCapacity(netReadBuffer, packetBufferSize()); + if (canRead && netReadBuffer.remaining() > 0) { + int netread = socketChannel.read(netReadBuffer); + canRead = netread > 0; + } + do { - netReadBuffer = Utils.ensureCapacity(netReadBuffer, packetBufferSize()); - if (canRead && netReadBuffer.remaining() > 0) { - int netread = socketChannel.read(netReadBuffer); - canRead = netread > 0; - } netReadBuffer.flip(); SSLEngineResult unwrap = sslEngine.unwrap(netReadBuffer, appReadBuffer); netReadBuffer.compact(); - // handle ssl renegotiation. if (unwrap.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) { handshake(); @@ -367,26 +376,21 @@ public class SSLTransportLayer implements TransportLayer { } else if (unwrap.getStatus() == Status.BUFFER_OVERFLOW) { int currentApplicationBufferSize = applicationBufferSize(); appReadBuffer = Utils.ensureCapacity(appReadBuffer, currentApplicationBufferSize); - if (appReadBuffer.position() > 0) { - break; - } else if (appReadBuffer.position() >= currentApplicationBufferSize) { + if (appReadBuffer.position() >= currentApplicationBufferSize) { throw new IllegalStateException("Buffer overflow when available data (" + appReadBuffer.position() + ") > application buffer size (" + currentApplicationBufferSize + ")"); } + break; } else if (unwrap.getStatus() == Status.BUFFER_UNDERFLOW) { int currentPacketBufferSize = packetBufferSize(); netReadBuffer = Utils.ensureCapacity(netReadBuffer, currentPacketBufferSize); - if (netReadBuffer.position() >= currentPacketBufferSize) { - throw new IllegalStateException("Buffer underflow when available data (" + netReadBuffer.position() + - ") > packet buffer size (" + currentPacketBufferSize + ")"); - } - if (!canRead) - break; + break; } else if (unwrap.getStatus() == Status.CLOSED) { throw new EOFException(); } - } while(netReadBuffer.position() != 0); + } while(netReadBuffer.hasRemaining()); } + return read; } @@ -416,12 +420,19 @@ public class SSLTransportLayer implements TransportLayer { public int write(ByteBuffer src) throws IOException { int written = 0; if (closing || closed) throw new IOException("Channel is in closing state"); + if (!handshakeComplete) return written; if (!flush(netWriteBuffer)) return written; netWriteBuffer.clear(); SSLEngineResult wrap = sslEngine.wrap(src, netWriteBuffer); netWriteBuffer.flip(); + + if (wrap.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) { + handshake(); + return written; + } + if (wrap.getStatus() == Status.OK) { written = wrap.bytesConsumed(); flush(netWriteBuffer); @@ -439,14 +450,16 @@ public class SSLTransportLayer implements TransportLayer { } public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { - int totalWritten = 0; - for (int i = offset; i < length; i++) { + long totalWritten = 0; + int i = offset; + while (i < length) { if (srcs[i].hasRemaining()) { int written = write(srcs[i]); if (written > 0) { totalWritten += written; } } + i++; } return totalWritten; } diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index 024059e..de0bbe9 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -77,8 +77,7 @@ public class Selector implements Selectable { private static final Logger log = LoggerFactory.getLogger(Selector.class); private final java.nio.channels.Selector selector; - private final Map keys; - private final Map channels; + private final Map channels; private final List completedSends; private final List completedReceives; private final List disconnected; @@ -103,8 +102,7 @@ public class Selector implements Selectable { this.time = time; this.metricGrpPrefix = metricGrpPrefix; this.metricTags = metricTags; - this.keys = new HashMap(); - this.channels = new HashMap(); + this.channels = new HashMap(); this.completedSends = new ArrayList(); this.completedReceives = new ArrayList(); this.connected = new ArrayList(); @@ -129,16 +127,15 @@ public class Selector implements Selectable { */ @Override public void connect(int id, InetSocketAddress address, int sendBufferSize, int receiveBufferSize) throws IOException { - if (this.keys.containsKey(id)) + if (this.channels.containsKey(id)) throw new IllegalStateException("There is already a connection for id " + id); SocketChannel socketChannel = SocketChannel.open(); socketChannel.configureBlocking(false); - Socket socket = socketChannel.socket(); - socket.setKeepAlive(true); - socket.setSendBufferSize(sendBufferSize); - socket.setReceiveBufferSize(receiveBufferSize); - socket.setTcpNoDelay(true); + socketChannel.socket().setKeepAlive(true); + socketChannel.socket().setSendBufferSize(sendBufferSize); + socketChannel.socket().setReceiveBufferSize(receiveBufferSize); + socketChannel.socket().setTcpNoDelay(true); try { socketChannel.connect(address); } catch (UnresolvedAddressException e) { @@ -150,10 +147,9 @@ public class Selector implements Selectable { } SelectionKey key = socketChannel.register(this.selector, SelectionKey.OP_CONNECT); - key.attach(new Transmissions(id)); - Channel channel = channelBuilder.buildChannel(key); - this.keys.put(id, key); - this.channels.put(key, channel); + Channel channel = channelBuilder.buildChannel(id, key); + key.attach(channel); + this.channels.put(id, channel); } /** @@ -181,7 +177,7 @@ public class Selector implements Selectable { @Override public void close() { for (SelectionKey key : this.selector.keys()) - close(key); + close(channel(key)); try { this.selector.close(); } catch (IOException e) { @@ -196,17 +192,15 @@ public class Selector implements Selectable { * @param send The request to send */ public void send(NetworkSend send) { - SelectionKey key = keyForId(send.destination()); - Channel channel = channel(key); - Transmissions transmissions = transmissions(key); - if (transmissions.hasSend()) - throw new IllegalStateException("Attempt to begin a send operation with prior send operation still in progress."); - transmissions.send = send; + Channel channel = channelForId(send.destination()); + if (channel == null) { + throw new IllegalStateException("channel is not connected"); + } try { - channel.addInterestOps(SelectionKey.OP_WRITE); + channel.setSend(send); } catch (CancelledKeyException e) { - close(key); this.failedSends.add(send.destination()); + close(channel); } } @@ -239,62 +233,56 @@ public class Selector implements Selectable { while (iter.hasNext()) { SelectionKey key = iter.next(); iter.remove(); - - Transmissions transmissions = transmissions(key); Channel channel = channel(key); // register all per-broker metrics at once - sensors.maybeRegisterNodeMetrics(transmissions.id); + sensors.maybeRegisterNodeMetrics(channel.id()); try { /* complete any connections that have finished their handshake */ if (key.isConnectable()) { channel.finishConnect(); - this.connected.add(transmissions.id); + this.connected.add(channel.id()); this.sensors.connectionCreated.record(); } + /* if channel is not ready finish prepare */ if (!channel.isReady()) { - channel.connect(); - } else { - /* read from any connections that have readable data */ - if (key.isReadable()) { - if (!transmissions.hasReceive()) - transmissions.receive = new NetworkReceive(transmissions.id); - while (transmissions.receive.readFrom(channel) > 0 && transmissions.receive.complete()) { - transmissions.receive.payload().rewind(); - this.completedReceives.add(transmissions.receive); - this.sensors.recordBytesReceived(transmissions.id, transmissions.receive.payload().limit()); - transmissions.clearReceive(); - if (!transmissions.hasReceive()) - transmissions.receive = new NetworkReceive(transmissions.id); - } + channel.prepare(); + } + + /* if channel is ready read from any connections that have readable data */ + if (key.isReadable() && channel.isReady()) { + NetworkReceive networkReceive; + while ((networkReceive = channel.read()) != null) { + networkReceive.payload().rewind(); + this.completedReceives.add(networkReceive); + this.sensors.recordBytesReceived(channel.id(), networkReceive.payload().limit()); } + } - /* write to any sockets that have space in their buffer and for which we have data */ - if (key.isWritable()) { - transmissions.send.writeTo(channel); - if (transmissions.send.remaining() <= 0) { - this.completedSends.add(transmissions.send); - this.sensors.recordBytesSent(transmissions.id, transmissions.send.size()); - transmissions.clearSend(); - channel.removeInterestOps(SelectionKey.OP_WRITE); - } + /* if channel is ready write to any sockets that have space in their buffer and for which we have data */ + if (key.isWritable() && channel.isReady()) { + NetworkSend networkSend = channel.write(); + if (networkSend != null) { + this.completedSends.add(networkSend); + this.sensors.recordBytesSent(channel.id(), networkSend.size()); } } + /* cancel any defunct sockets */ if (!key.isValid()) { - close(key); - this.disconnected.add(transmissions.id); + close(channel(key)); + this.disconnected.add(channel.id()); } } catch (IOException e) { - String desc = socketDescription(channel); + String desc = channel.socketDescription(); if (e instanceof EOFException || e instanceof ConnectException) log.info("Connection {} disconnected", desc); else log.warn("Error in I/O with connection to {}", desc, e); - close(key); - this.disconnected.add(transmissions.id); + close(channel(key)); + this.disconnected.add(channel.id()); } } } @@ -302,15 +290,6 @@ public class Selector implements Selectable { this.sensors.ioTime.record(endIo - endSelect, time.milliseconds()); } - private String socketDescription(Channel channel) { - Socket socket = channel.socketChannel().socket(); - if (socket == null) - return "[unconnected socket]"; - else if (socket.getInetAddress() != null) - return socket.getInetAddress().toString(); - else - return socket.getLocalAddress().toString(); - } @Override public List completedSends() { @@ -334,34 +313,34 @@ public class Selector implements Selectable { @Override public void mute(int id) { - mute(this.keyForId(id)); + Channel channel = channelForId(id); + mute(channel); } - private void mute(SelectionKey key) { - Channel channel = channel(key); + private void mute(Channel channel) { channel.mute(); } @Override public void unmute(int id) { - unmute(this.keyForId(id)); + Channel channel = channelForId(id); + unmute(channel); } - private void unmute(SelectionKey key) { - Channel channel = channel(key); + private void unmute(Channel channel) { channel.unmute(); } @Override public void muteAll() { - for (SelectionKey key : this.keys.values()) - mute(key); + for (Channel channel : this.channels.values()) + mute(channel); } @Override public void unmuteAll() { - for (SelectionKey key : this.keys.values()) - unmute(key); + for (Channel channel : this.channels.values()) + unmute(channel); } /** @@ -395,80 +374,25 @@ public class Selector implements Selectable { /** * Begin closing this connection */ - private void close(SelectionKey key) { - Channel channel = channel(key); - this.channels.remove(key); - Transmissions trans = transmissions(key); - if (trans != null) { - this.keys.remove(trans.id); - trans.clearReceive(); - trans.clearSend(); - } - key.attach(null); - key.cancel(); + private void close(Channel channel) { + this.channels.remove(channel.id()); try { channel.close(); } catch (IOException e) { - log.error("Exception closing connection to node {}:", trans.id, e); + log.error("Exception closing connection to node {}:", channel.id(), e); } this.sensors.connectionClosed.record(); } /** - * Get the selection key associated with this numeric id - */ - private SelectionKey keyForId(int id) { - SelectionKey key = this.keys.get(id); - if (key == null) - throw new IllegalStateException("Attempt to write to socket for which there is no open connection."); - return key; - } - - /** - * Get the transmissions for the given connection - */ - private Transmissions transmissions(SelectionKey key) { - return (Transmissions) key.attachment(); - } - - /** * Get the Channel associated with this selection key */ private Channel channel(SelectionKey key) { - return this.channels.get(key); + return (Channel) key.attachment(); } protected Channel channelForId(int id) { - return channel(keyForId(id)); - } - - /** - * The id and in-progress send and receive associated with a connection - */ - private static class Transmissions { - public int id; - public NetworkSend send; - public NetworkReceive receive; - - public Transmissions(int id) { - this.id = id; - } - - public boolean hasSend() { - return this.send != null; - } - - public void clearSend() { - this.send = null; - } - - public boolean hasReceive() { - return this.receive != null; - } - - public void clearReceive() { - this.receive = null; - } + return channels.get(id); } private class SelectorMetrics { @@ -530,7 +454,7 @@ public class Selector implements Selectable { metricName = new MetricName("connection-count", metricGrpName, "The current number of active connections.", metricTags); this.metrics.addMetric(metricName, new Measurable() { public double measure(MetricConfig config, long now) { - return keys.size(); + return channels.size(); } }); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java index 002a695..8fdaae2 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java @@ -22,6 +22,8 @@ package org.apache.kafka.common.network; */ import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.channels.ScatteringByteChannel; +import java.nio.channels.GatheringByteChannel; import java.nio.channels.SocketChannel; import java.io.DataInputStream; @@ -30,33 +32,7 @@ import java.io.DataOutputStream; import java.security.Principal; -public interface TransportLayer { - - /** - * Closes this channel - * - * @throws IOException If and I/O error occurs - */ - void close() throws IOException; - - /** - * Writes a sequence of bytes to this channel from the given buffer. - */ - int write(ByteBuffer src) throws IOException; - - long write(ByteBuffer[] srcs) throws IOException; - - long write(ByteBuffer[] srcs, int offset, int length) throws IOException; - - /** - * Reads sequence of bytes from the channel to the given buffer. - */ - int read(ByteBuffer dst) throws IOException; - - long read(ByteBuffer[] dsts) throws IOException; - - long read(ByteBuffer[] dsts, int offset, int length) throws IOException; - +public interface TransportLayer extends ScatteringByteChannel, GatheringByteChannel { /** * Returns true if the channel has handshake and authenticaiton done. diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java index ab9e6b9..5ec02ef 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java @@ -63,7 +63,7 @@ public class SSLSelectorTest { Map sslClientConfigs = sslConfigs.get(SSLFactory.Mode.CLIENT); sslClientConfigs.put(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SecurityConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); - this.channelBuilder = new MockSSLChannelBuilder(); + this.channelBuilder = new SSLChannelBuilder(); this.channelBuilder.configure(sslClientConfigs); this.selector = new Selector(new Metrics(), new MockTime(), "MetricGroup", new LinkedHashMap(), channelBuilder); } @@ -140,6 +140,7 @@ public class SSLSelectorTest { sendAndReceive(node, requestPrefix, 0, reqs); } + /** * Test sending an empty string */ @@ -150,6 +151,19 @@ public class SSLSelectorTest { assertEquals("", blockingRequest(node, "")); } + /** + * Test sending an small string + */ + @Test + public void testIncompleteSend() throws Exception { + int bufferSize = 16391; + int node = 0; + InetSocketAddress addr = new InetSocketAddress("localhost", server.port); + selector.connect(node, addr, bufferSize, bufferSize); + String requestPrefix = TestUtils.randomString(bufferSize); + assertEquals(requestPrefix, blockingRequest(node, requestPrefix)); + } + @Test public void testMute() throws Exception { blockingConnect(0); @@ -217,48 +231,6 @@ public class SSLSelectorTest { } } - @Test - public void testLongDeferredTasks() throws Exception { - final int fastNode = 0; - final int slowNode = 1; - - // create connections - InetSocketAddress addr = new InetSocketAddress("localhost", server.port); - selector.connect(fastNode, addr, BUFFER_SIZE, BUFFER_SIZE); - selector.connect(slowNode, addr, BUFFER_SIZE, BUFFER_SIZE); - - sendAndReceive(fastNode, String.valueOf(fastNode), 0, 10); - sendAndReceive(slowNode, String.valueOf(slowNode), 0, 10); - - Semaphore delegatedTaskSemaphore = new Semaphore(0); - Channel channel = selector.channelForId(slowNode); - MockSSLTransportLayer sslTransportLayer = (MockSSLTransportLayer) channel.transportLayer(); - - sslTransportLayer.delegatedTaskSemaphore = delegatedTaskSemaphore; - // set renegotiate flag and send a message to trigger renegotiation on the slow channel - server.renegotiate(); - selector.send(createSend(slowNode, String.valueOf(slowNode) + "-" + 11)); - while (sslTransportLayer.engine.getHandshakeStatus() != SSLEngineResult.HandshakeStatus.NEED_TASK) { - selector.poll(1L); - } - - // Slow channel is now blocked on the delegated task. Check that fast channel is able to make progress - sendAndReceive(fastNode, String.valueOf(fastNode), 10, 20); - - // Allow slow channel to continue and check that it works as expected - delegatedTaskSemaphore.release(10); - selector.send(createSend(slowNode, String.valueOf(slowNode) + "-" + 12)); - int responses = 11; - while (responses <= 12) { - selector.poll(0L); - for (NetworkReceive receive : selector.completedReceives()) { - assertEquals(slowNode + "-" + responses, asString(receive)); - responses++; - } - } - } - - private String blockingRequest(int node, String s) throws IOException { selector.send(createSend(node, s)); @@ -308,74 +280,4 @@ public class SSLSelectorTest { } } - // Channel builder with MockSSLTransportLayer. - private static class MockSSLChannelBuilder implements ChannelBuilder { - private SSLFactory sslFactory; - private ExecutorService executorService; - private PrincipalBuilder principalBuilder; - - public void configure(Map configs) throws KafkaException { - try { - this.executorService = Executors.newScheduledThreadPool(1); - this.sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT); - this.sslFactory.configure(configs); - this.principalBuilder = (PrincipalBuilder) Utils.newInstance((Class) configs.get(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG)); - this.principalBuilder.configure(configs); - } catch (Exception e) { - throw new KafkaException(e); - } - } - - - @Override - public Channel buildChannel(SelectionKey key) throws KafkaException { - Channel channel = null; - try { - SocketChannel socketChannel = (SocketChannel) key.channel(); - MockSSLTransportLayer transportLayer = new MockSSLTransportLayer(key, - sslFactory.createSSLEngine(socketChannel.socket().getInetAddress().getHostName(), - socketChannel.socket().getPort()), - executorService); - Authenticator authenticator = new DefaultAuthenticator(transportLayer, this.principalBuilder); - channel = new Channel(transportLayer, authenticator); - } catch (Exception e) { - throw new KafkaException(e); - } - return channel; - } - - public void close() { - this.executorService.shutdown(); - this.principalBuilder.close(); - } - } - - private static class MockSSLTransportLayer extends SSLTransportLayer { - private final SSLEngine engine; - private boolean engineClosed; - private Semaphore delegatedTaskSemaphore; - - public MockSSLTransportLayer(SelectionKey key, SSLEngine engine, ExecutorService executorService) throws IOException { - super(key, engine, executorService); - this.engine = engine; - } - - @Override - protected Runnable delegatedTask() { - final Runnable task = super.delegatedTask(); - return task == null ? null : new Runnable() { - @Override - public void run() { - if (delegatedTaskSemaphore != null) { - try { - delegatedTaskSemaphore.acquire(); - } catch (InterruptedException e) { - } - } - task.run(); - } - }; - } - } - } diff --git a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java index 4bb95f0..c1dc5b8 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java @@ -202,16 +202,18 @@ public class SelectorTest { } @Test - public void testShortMessageSequence() throws Exception { + public void testLargeMessageSequence() throws Exception { int bufferSize = 512 * 1024; int node = 0; int reqs = 50; InetSocketAddress addr = new InetSocketAddress("localhost", server.port); selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); - String requestPrefix = "hello-wordl"; + String requestPrefix = TestUtils.randomString(bufferSize); sendAndReceive(node, requestPrefix, 0, reqs); } + + /** * Test sending an empty string */ -- 2.4.6 From fe595fd4fda45ebd7c5da88ee093ab17817bb94d Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Wed, 3 Jun 2015 18:43:34 -0700 Subject: [PATCH 13/30] KAFKA-1690. new java producer needs ssl support as a client. --- build.gradle | 8 +- .../java/org/apache/kafka/clients/ClientUtils.java | 4 +- .../apache/kafka/clients/CommonClientConfigs.java | 18 +- .../kafka/clients/consumer/ConsumerConfig.java | 2 +- .../kafka/clients/producer/ProducerConfig.java | 2 +- .../kafka/common/config/SecurityConfigs.java | 4 - .../kafka/common/network/ByteBufferReceive.java | 5 +- .../kafka/common/network/ByteBufferSend.java | 13 +- .../org/apache/kafka/common/network/Channel.java | 7 +- .../kafka/common/network/NetworkReceive.java | 11 +- .../common/network/PlainTextTransportLayer.java | 140 +++++++++---- .../org/apache/kafka/common/network/Receive.java | 5 +- .../kafka/common/network/SSLChannelBuilder.java | 8 +- .../apache/kafka/common/network/SSLFactory.java | 2 +- .../kafka/common/network/SSLTransportLayer.java | 224 +++++++++++++-------- .../org/apache/kafka/common/network/Selector.java | 1 - .../java/org/apache/kafka/common/network/Send.java | 9 +- .../kafka/common/network/TransportLayer.java | 39 +++- .../kafka/clients/producer/KafkaProducerTest.java | 19 +- .../apache/kafka/common/network/EchoServer.java | 5 +- .../kafka/common/network/SSLSelectorTest.java | 26 +-- .../java/org/apache/kafka/test/TestSSLUtils.java | 7 +- 22 files changed, 328 insertions(+), 231 deletions(-) diff --git a/build.gradle b/build.gradle index e3897b3..3633152 100644 --- a/build.gradle +++ b/build.gradle @@ -384,8 +384,8 @@ project(':clients') { archives testJar } - // checkstyle { - // configFile = new File(rootDir, "checkstyle/checkstyle.xml") - // } - // test.dependsOn('checkstyleMain', 'checkstyleTest') + checkstyle { + configFile = new File(rootDir, "checkstyle/checkstyle.xml") + } + test.dependsOn('checkstyleMain', 'checkstyleTest') } diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index 3e92d6d..782d182 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -23,7 +23,6 @@ import org.apache.kafka.common.protocol.SecurityProtocol; import org.apache.kafka.common.network.ChannelBuilder; import org.apache.kafka.common.network.SSLChannelBuilder; import org.apache.kafka.common.network.PlainTextChannelBuilder; -import org.apache.kafka.common.config.SecurityConfigs; import org.apache.kafka.common.config.ConfigException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -75,8 +74,7 @@ public class ClientUtils { */ public static ChannelBuilder createChannelBuilder(Map configs) { ChannelBuilder channelBuilder = null; - SecurityProtocol securityProtocol = configs.containsKey(SecurityConfigs.SECURITY_PROTOCOL_CONFIG) ? - SecurityProtocol.valueOf((String) configs.get(SecurityConfigs.SECURITY_PROTOCOL_CONFIG)) : SecurityProtocol.PLAINTEXT; + SecurityProtocol securityProtocol = SecurityProtocol.valueOf((String) configs.get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); if (securityProtocol == SecurityProtocol.SSL) { channelBuilder = new SSLChannelBuilder(); diff --git a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java index cf32e4e..796f23e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java +++ b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -17,7 +17,7 @@ package org.apache.kafka.clients; * Some configurations shared by both producer and consumer */ public class CommonClientConfigs { - + /* * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. */ @@ -27,10 +27,10 @@ public class CommonClientConfigs { + "host1:port1,host2:port2,.... Since these servers are just used for the initial connection to " + "discover the full cluster membership (which may change dynamically), this list need not contain the full set of " + "servers (you may want more than one, though, in case a server is down)."; - + public static final String METADATA_MAX_AGE_CONFIG = "metadata.max.age.ms"; public static final String METADATA_MAX_AGE_DOC = "The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions."; - + public static final String SEND_BUFFER_CONFIG = "send.buffer.bytes"; public static final String SEND_BUFFER_DOC = "The size of the TCP send buffer (SO_SNDBUF) to use when sending data."; @@ -45,7 +45,7 @@ public class CommonClientConfigs { public static final String RETRY_BACKOFF_MS_CONFIG = "retry.backoff.ms"; public static final String RETRY_BACKOFF_MS_DOC = "The amount of time to wait before attempting to retry a failed fetch request to a given topic partition. This avoids repeated fetching-and-failing in a tight loop."; - + public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = "metrics.sample.window.ms"; public static final String METRICS_SAMPLE_WINDOW_MS_DOC = "The number of samples maintained to compute metrics."; @@ -55,4 +55,8 @@ public class CommonClientConfigs { public static final String METRIC_REPORTER_CLASSES_CONFIG = "metric.reporters"; public static final String METRIC_REPORTER_CLASSES_DOC = "A list of classes to use as metrics reporters. Implementing the MetricReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics."; -} \ No newline at end of file + public static final String SECURITY_PROTOCOL_CONFIG = "security.protocol"; + public static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. Currently only PLAINTEXT and SSL are supported."; + public static final String DEFAULT_SECURITY_PROTOCOL = "PLAINTEXT"; + +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java index dff4258..cc63392 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java @@ -278,7 +278,7 @@ public class ConsumerConfig extends AbstractConfig { Type.CLASS, Importance.HIGH, VALUE_DESERIALIZER_CLASS_DOC) - .define(SecurityConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SECURITY_PROTOCOL, Importance.MEDIUM, SecurityConfigs.SECURITY_PROTOCOL_DOC) + .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL, Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) .define(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Type.CLASS, SecurityConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS, Importance.LOW, SecurityConfigs.PRINCIPAL_BUILDER_CLASS_DOC) .define(SecurityConfigs.SSL_PROTOCOL_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_PROTOCOL, Importance.MEDIUM, SecurityConfigs.SSL_PROTOCOL_DOC) .define(SecurityConfigs.SSL_PROVIDER_CONFIG, Type.STRING, Importance.MEDIUM, SecurityConfigs.SSL_PROVIDER_DOC, false) diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java index baa3d41..bd442b2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java @@ -221,7 +221,7 @@ public class ProducerConfig extends AbstractConfig { MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_DOC) .define(KEY_SERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, KEY_SERIALIZER_CLASS_DOC) .define(VALUE_SERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, VALUE_SERIALIZER_CLASS_DOC) - .define(SecurityConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SECURITY_PROTOCOL, Importance.MEDIUM, SecurityConfigs.SECURITY_PROTOCOL_DOC) + .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL, Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) .define(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Type.CLASS, SecurityConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS, Importance.LOW, SecurityConfigs.PRINCIPAL_BUILDER_CLASS_DOC) .define(SecurityConfigs.SSL_PROTOCOL_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_PROTOCOL, Importance.MEDIUM, SecurityConfigs.SSL_PROTOCOL_DOC) .define(SecurityConfigs.SSL_PROVIDER_CONFIG, Type.STRING, Importance.MEDIUM, SecurityConfigs.SSL_PROVIDER_DOC, false) diff --git a/clients/src/main/java/org/apache/kafka/common/config/SecurityConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/SecurityConfigs.java index 1855399..c2f2181 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/SecurityConfigs.java +++ b/clients/src/main/java/org/apache/kafka/common/config/SecurityConfigs.java @@ -21,10 +21,6 @@ public class SecurityConfigs { * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. */ - public static final String SECURITY_PROTOCOL_CONFIG = "security.protocol"; - public static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. Currently only PLAINTEXT and SSL are supported."; - public static final String DEFAULT_SECURITY_PROTOCOL = "PLAINTEXT"; - public static final String PRINCIPAL_BUILDER_CLASS_CONFIG = "principal.builder.class"; public static final String PRINCIPAL_BUILDER_CLASS_DOC = "principal builder to generate a java Principal. This config is optional for client."; public static final String DEFAULT_PRINCIPAL_BUILDER_CLASS = "org.apache.kafka.common.security.auth.DefaultPrincipalBuilder"; diff --git a/clients/src/main/java/org/apache/kafka/common/network/ByteBufferReceive.java b/clients/src/main/java/org/apache/kafka/common/network/ByteBufferReceive.java index 129ae82..0e37204 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ByteBufferReceive.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ByteBufferReceive.java @@ -18,7 +18,6 @@ package org.apache.kafka.common.network; import java.io.IOException; import java.nio.ByteBuffer; -import java.nio.channels.ScatteringByteChannel; /** * A receive backed by an array of ByteBuffers @@ -48,8 +47,8 @@ public class ByteBufferReceive implements Receive { } @Override - public long readFrom(ScatteringByteChannel channel) throws IOException { - long read = channel.read(buffers); + public long readFrom(TransportLayer transportLayer) throws IOException { + long read = transportLayer.read(buffers); remaining += read; return read; } diff --git a/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java b/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java index c8213e1..3292278 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -15,7 +15,6 @@ package org.apache.kafka.common.network; import java.io.EOFException; import java.io.IOException; import java.nio.ByteBuffer; -import java.nio.channels.GatheringByteChannel; /** * A send backed by an array of byte buffers @@ -26,6 +25,7 @@ public class ByteBufferSend implements Send { protected final ByteBuffer[] buffers; private int remaining; private int size; + private boolean pending; public ByteBufferSend(int destination, ByteBuffer... buffers) { super(); @@ -43,7 +43,7 @@ public class ByteBufferSend implements Send { @Override public boolean completed() { - return remaining <= 0; + return remaining <= 0 && !pending; } @Override @@ -61,11 +61,12 @@ public class ByteBufferSend implements Send { } @Override - public long writeTo(GatheringByteChannel channel) throws IOException { - long written = channel.write(buffers); + public long writeTo(TransportLayer transportLayer) throws IOException { + long written = transportLayer.write(buffers); if (written < 0) throw new EOFException("This shouldn't happen."); remaining -= written; + pending = transportLayer.pending(); return written; } diff --git a/clients/src/main/java/org/apache/kafka/common/network/Channel.java b/clients/src/main/java/org/apache/kafka/common/network/Channel.java index 616bfcf..fc285b8 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Channel.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Channel.java @@ -142,11 +142,10 @@ public class Channel { private boolean send(NetworkSend send) throws IOException { send.writeTo(transportLayer); - boolean sendComplete = send.remaining() == 0; - if (sendComplete) { + if (send.completed()) { transportLayer.removeInterestOps(SelectionKey.OP_WRITE); } - return sendComplete; + return send.completed(); } - } +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java b/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java index fc0d168..e5bb2b4 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java +++ b/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -15,7 +15,6 @@ package org.apache.kafka.common.network; import java.io.EOFException; import java.io.IOException; import java.nio.ByteBuffer; -import java.nio.channels.ScatteringByteChannel; /** * A size delimited Receive that consists of a 4 byte network-ordered size N followed by N bytes of content @@ -54,10 +53,10 @@ public class NetworkReceive implements Receive { } @Override - public long readFrom(ScatteringByteChannel channel) throws IOException { + public long readFrom(TransportLayer transportLayer) throws IOException { int read = 0; if (size.hasRemaining()) { - int bytesRead = channel.read(size); + int bytesRead = transportLayer.read(size); if (bytesRead < 0) throw new EOFException(); read += bytesRead; @@ -70,7 +69,7 @@ public class NetworkReceive implements Receive { } } if (buffer != null) { - int bytesRead = channel.read(buffer); + int bytesRead = transportLayer.read(buffer); if (bytesRead < 0) throw new EOFException(); read += bytesRead; diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java index f730c25..dbd35db 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java @@ -48,6 +48,31 @@ public class PlainTextTransportLayer implements TransportLayer { this.socketChannel = (SocketChannel) key.channel(); } + public boolean isReady() { + return true; + } + + public void finishConnect() throws IOException { + socketChannel.finishConnect(); + int ops = key.interestOps(); + ops &= ~SelectionKey.OP_CONNECT; + ops |= SelectionKey.OP_READ; + key.interestOps(ops); + } + + public void disconnect() { + key.cancel(); + } + + public SocketChannel socketChannel() { + return socketChannel; + } + + + public boolean isOpen() { + return socketChannel.isOpen(); + } + /** * Closes this channel * @@ -58,84 +83,115 @@ public class PlainTextTransportLayer implements TransportLayer { socketChannel.close(); } - public boolean isOpen() { - return socketChannel.isOpen(); - } - public void disconnect() { - key.cancel(); - } /** - * Writes a sequence of bytes to this channel from the given buffer. + * There won't be any pending bytes to written socketChannel once write method is called. + * This will always return false. */ - public int write(ByteBuffer src) throws IOException { - return socketChannel.write(src); + public boolean pending() { + return false; } - public long write(ByteBuffer[] srcs) throws IOException { - return socketChannel.write(srcs); - } - public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { - return socketChannel.write(srcs, offset, length); - } + /** + * Performs SSL handshake hence is a no-op for the non-secure + * implementation + * @throws IOException + */ + public void handshake() throws IOException {} + + + /** + * Reads a sequence of bytes from this channel into the given buffer. + * + * @param dst The buffer into which bytes are to be transferred + * @return The number of bytes read, possible zero or -1 if the channel has reached end-of-stream + * @throws IOException if some other I/O error occurs + */ public int read(ByteBuffer dst) throws IOException { return socketChannel.read(dst); } + /** + * Reads a sequence of bytes from this channel into the given buffers. + * + * @param dsts - The buffers into which bytes are to be transferred. + * @return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. + * @throws IOException if some other I/O error occurs + */ public long read(ByteBuffer[] dsts) throws IOException { return socketChannel.read(dsts); } + /** + * Reads a sequence of bytes from this channel into a subsequence of the given buffers. + * @param dsts - The buffers into which bytes are to be transferred + * @param offset - The offset within the buffer array of the first buffer into which bytes are to be transferred; must be non-negative and no larger than dsts.length. + * @param length - The maximum number of buffers to be accessed; must be non-negative and no larger than dsts.length - offset + * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. + * @throws IOException if some other I/O error occurs + */ public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { return socketChannel.read(dsts, offset, length); } - public boolean isReady() { - return true; - } - - public SocketChannel socketChannel() { - return socketChannel; - } - - public void finishConnect() throws IOException { - socketChannel.finishConnect(); - int ops = key.interestOps(); - ops &= ~SelectionKey.OP_CONNECT; - ops |= SelectionKey.OP_READ; - key.interestOps(ops); + /** + * Writes a sequence of bytes to this channel from the given buffer. + * + * @param src The buffer from which bytes are to be retrieved + * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream + * @throws IOException If some other I/O error occurs + */ + public int write(ByteBuffer src) throws IOException { + return socketChannel.write(src); } /** - * Performs SSL handshake hence is a no-op for the non-secure - * implementation - * @throws IOException + * Writes a sequence of bytes to this channel from the given buffer. + * + * @param src The buffer from which bytes are to be retrieved + * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream + * @throws IOException If some other I/O error occurs */ - public void handshake() throws IOException {} - - public DataInputStream inStream() throws IOException { - if (inStream == null) - this.inStream = new DataInputStream(socketChannel.socket().getInputStream()); - return inStream; + public long write(ByteBuffer[] srcs) throws IOException { + return socketChannel.write(srcs); } - public DataOutputStream outStream() throws IOException { - if (outStream == null) - this.outStream = new DataOutputStream(socketChannel.socket().getOutputStream()); - return outStream; + /** + * Writes a sequence of bytes to this channel from the subsequence of the given buffers. + * + * @param srcs The buffers from which bytes are to be retrieved + * @param offset The offset within the buffer array of the first buffer from which bytes are to be retrieved; must be non-negative and no larger than srcs.length. + * @param length - The maximum number of buffers to be accessed; must be non-negative and no larger than srcs.length - offset. + * @return returns no.of bytes written , possibly zero. + * @throws IOException If some other I/O error occurs + */ + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + return socketChannel.write(srcs, offset, length); } + /** + * Rerturns ANONYMOUS as Principal. + */ public Principal peerPrincipal() throws IOException { return principal; } + /** + * Adds the interestOps to selectionKey. + * @param SelectionKey interestOps + */ public void addInterestOps(int ops) { key.interestOps(key.interestOps() | ops); + } + /** + * Removes the interestOps from selectionKey. + * @param SelectionKey interestOps + */ public void removeInterestOps(int ops) { key.interestOps(key.interestOps() & ~ops); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/Receive.java b/clients/src/main/java/org/apache/kafka/common/network/Receive.java index 4e33078..0862eff 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Receive.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Receive.java @@ -18,7 +18,6 @@ package org.apache.kafka.common.network; import java.io.IOException; import java.nio.ByteBuffer; -import java.nio.channels.ScatteringByteChannel; /** * This interface models the in-progress reading of data from a channel to a source identified by an integer id @@ -42,10 +41,10 @@ public interface Receive { /** * Read bytes into this receive from the given channel - * @param channel The channel to read from + * @param transportLayer The transportLayer to read from * @return The number of bytes read * @throws IOException If the reading fails */ - public long readFrom(ScatteringByteChannel channel) throws IOException; + public long readFrom(TransportLayer transportLayer) throws IOException; } diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java index 8766824..2d89497 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java @@ -15,8 +15,6 @@ package org.apache.kafka.common.network; import java.nio.channels.SelectionKey; import java.nio.channels.SocketChannel; import java.util.Map; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import org.apache.kafka.common.security.auth.PrincipalBuilder; import org.apache.kafka.common.config.SecurityConfigs; @@ -28,12 +26,10 @@ import org.slf4j.LoggerFactory; public class SSLChannelBuilder implements ChannelBuilder { private static final Logger log = LoggerFactory.getLogger(SSLChannelBuilder.class); private SSLFactory sslFactory; - private ExecutorService executorService; private PrincipalBuilder principalBuilder; public void configure(Map configs) throws KafkaException { try { - this.executorService = Executors.newScheduledThreadPool(1); this.sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT); this.sslFactory.configure(configs); this.principalBuilder = (PrincipalBuilder) Utils.newInstance((Class) configs.get(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG)); @@ -49,8 +45,7 @@ public class SSLChannelBuilder implements ChannelBuilder { SocketChannel socketChannel = (SocketChannel) key.channel(); SSLTransportLayer transportLayer = new SSLTransportLayer(key, sslFactory.createSSLEngine(socketChannel.socket().getInetAddress().getHostName(), - socketChannel.socket().getPort()), - executorService); + socketChannel.socket().getPort())); Authenticator authenticator = new DefaultAuthenticator(transportLayer, this.principalBuilder); channel = new Channel(id, transportLayer, authenticator); } catch (Exception e) { @@ -61,7 +56,6 @@ public class SSLChannelBuilder implements ChannelBuilder { } public void close() { - this.executorService.shutdown(); this.principalBuilder.close(); } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java index 557c5f9..b669069 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java @@ -136,7 +136,7 @@ public class SSLFactory implements Configurable { sslEngine.setUseClientMode(false); if (needClientAuth) sslEngine.setNeedClientAuth(needClientAuth); - else if (wantClientAuth) + else sslEngine.setNeedClientAuth(wantClientAuth); } else { sslEngine.setUseClientMode(true); diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index f79b5ef..eb898b4 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -33,10 +33,6 @@ import javax.net.ssl.SSLException; import javax.net.ssl.SSLSession; import javax.net.ssl.SSLPeerUnverifiedException; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.util.concurrent.ExecutorService; - import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -59,23 +55,23 @@ public class SSLTransportLayer implements TransportLayer { private ByteBuffer netWriteBuffer; private ByteBuffer appReadBuffer; private ByteBuffer emptyBuf = ByteBuffer.allocate(0); - private DataInputStream inStream; - private DataOutputStream outStream; - private ExecutorService executorService; private int interestOps; private int socketSendBufferSize; - public SSLTransportLayer(SelectionKey key, SSLEngine sslEngine, ExecutorService executorService) throws IOException { + public SSLTransportLayer(SelectionKey key, SSLEngine sslEngine) throws IOException { this.key = key; this.socketChannel = (SocketChannel) key.channel(); this.sslEngine = sslEngine; - this.executorService = executorService; this.netReadBuffer = ByteBuffer.allocateDirect(packetBufferSize()); this.netWriteBuffer = ByteBuffer.allocateDirect(packetBufferSize()); this.appReadBuffer = ByteBuffer.allocateDirect(applicationBufferSize()); this.socketSendBufferSize = this.socketChannel.socket().getSendBufferSize(); } + /** + * starts sslEngine handshake process and sets the selectionKey interestOps based + * sslEngine handshakeStatus. + */ private void startHandshake() throws IOException { netWriteBuffer.position(0); netWriteBuffer.limit(0); @@ -91,10 +87,14 @@ public class SSLTransportLayer implements TransportLayer { key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); } - public SocketChannel socketChannel() { - return socketChannel; + + public boolean isReady() { + return handshakeComplete; } + /** + * does socketChannel.finishConnect() + */ public void finishConnect() throws IOException { socketChannel.finishConnect(); removeInterestOps(SelectionKey.OP_CONNECT); @@ -103,12 +103,54 @@ public class SSLTransportLayer implements TransportLayer { startHandshake(); } + /** + * disconnects selectionKey. + */ + public void disconnect() { + key.cancel(); + } + + public SocketChannel socketChannel() { + return socketChannel; + } + public boolean isOpen() { return socketChannel.isOpen(); } - public void disconnect() { - key.cancel(); + /** + * Sends a SSL close message and closes socketChannel. + * @throws IOException if an I/O error occurs + * @throws IOException if there is data on the outgoing network buffer and we are unable to flush it + */ + public void close() throws IOException { + if (closing) return; + closing = true; + sslEngine.closeOutbound(); + + if (!flush(netWriteBuffer)) { + throw new IOException("Remaining data in the network buffer, can't send SSL close message."); + } + //prep the buffer for the close message + netWriteBuffer.clear(); + //perform the close, since we called sslEngine.closeOutbound + SSLEngineResult handshake = sslEngine.wrap(emptyBuf, netWriteBuffer); + //we should be in a close state + if (handshake.getStatus() != SSLEngineResult.Status.CLOSED) { + throw new IOException("Invalid close state, will not send network data."); + } + netWriteBuffer.flip(); + flush(netWriteBuffer); + socketChannel.socket().close(); + socketChannel.close(); + closed = !netWriteBuffer.hasRemaining() && (handshake.getHandshakeStatus() != HandshakeStatus.NEED_WRAP); + } + + /** + * returns true if there are any pending contents in netWriteBuffer + */ + public boolean pending() { + return netWriteBuffer.hasRemaining(); } /** @@ -150,9 +192,9 @@ public class SSLTransportLayer implements TransportLayer { if (handshakeResult.getStatus() == Status.BUFFER_OVERFLOW) { int currentPacketBufferSize = packetBufferSize(); netWriteBuffer = Utils.ensureCapacity(netWriteBuffer, currentPacketBufferSize); - if (netWriteBuffer.position() > currentPacketBufferSize) { + if (netWriteBuffer.position() >= currentPacketBufferSize) { throw new IllegalStateException("Buffer overflow when available data (" + netWriteBuffer.position() + - ") > network buffer size (" + currentPacketBufferSize + ")"); + ") >= network buffer size (" + currentPacketBufferSize + ")"); } } else if (handshakeResult.getStatus() == Status.BUFFER_UNDERFLOW) { throw new IllegalStateException("Should not have received BUFFER_UNDERFLOW during handshake WRAP."); @@ -161,7 +203,7 @@ public class SSLTransportLayer implements TransportLayer { } //fall down to NEED_UNWRAP on the same call, will result in a //BUFFER_UNDERFLOW if it needs data - if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || !flush(netWriteBuffer)) { //check for write bit + if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || (!write && !flush(netWriteBuffer))) { key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); break; } @@ -211,7 +253,7 @@ public class SSLTransportLayer implements TransportLayer { /** - * Executes the SSLEngine tasks needed on the executorservice thread. + * Executes the SSLEngine tasks needed. * @return HandshakeStatus */ private HandshakeStatus runDelegatedTasks() { @@ -253,6 +295,8 @@ public class SSLTransportLayer implements TransportLayer { * @throws IOException */ private SSLEngineResult handshakeWrap(Boolean doWrite) throws IOException { + if (netWriteBuffer.hasRemaining()) + throw new IllegalStateException("handshakeWrap called with netWriteBuffer not empty"); //this should never be called with a network buffer that contains data //so we can clear it here. netWriteBuffer.clear(); @@ -304,37 +348,7 @@ public class SSLTransportLayer implements TransportLayer { } - /** - * Sends a SSL close message, will not physically close the connection here.
    - * @throws IOException if an I/O error occurs - * @throws IOException if there is data on the outgoing network buffer and we are unable to flush it - */ - public void close() throws IOException { - if (closing) return; - closing = true; - sslEngine.closeOutbound(); - if (!flush(netWriteBuffer)) { - throw new IOException("Remaining data in the network buffer, can't send SSL close message."); - } - //prep the buffer for the close message - netWriteBuffer.clear(); - //perform the close, since we called sslEngine.closeOutbound - SSLEngineResult handshake = sslEngine.wrap(emptyBuf, netWriteBuffer); - //we should be in a close state - if (handshake.getStatus() != SSLEngineResult.Status.CLOSED) { - throw new IOException("Invalid close state, will not send network data."); - } - netWriteBuffer.flip(); - flush(netWriteBuffer); - socketChannel.socket().close(); - socketChannel.close(); - closed = !netWriteBuffer.hasRemaining() && (handshake.getHandshakeStatus() != HandshakeStatus.NEED_WRAP); - } - - public boolean isReady() { - return handshakeComplete; - } /** * Reads a sequence of bytes from this channel into the given buffer. @@ -363,58 +377,87 @@ public class SSLTransportLayer implements TransportLayer { do { netReadBuffer.flip(); - SSLEngineResult unwrap = sslEngine.unwrap(netReadBuffer, appReadBuffer); + SSLEngineResult unwrapResult = sslEngine.unwrap(netReadBuffer, appReadBuffer); netReadBuffer.compact(); // handle ssl renegotiation. - if (unwrap.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) { + if (unwrapResult.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) { handshake(); break; } - if (unwrap.getStatus() == Status.OK) { + if (unwrapResult.getStatus() == Status.OK) { read += readFromAppBuffer(dst); - } else if (unwrap.getStatus() == Status.BUFFER_OVERFLOW) { + } else if (unwrapResult.getStatus() == Status.BUFFER_OVERFLOW) { int currentApplicationBufferSize = applicationBufferSize(); appReadBuffer = Utils.ensureCapacity(appReadBuffer, currentApplicationBufferSize); if (appReadBuffer.position() >= currentApplicationBufferSize) { throw new IllegalStateException("Buffer overflow when available data (" + appReadBuffer.position() + - ") > application buffer size (" + currentApplicationBufferSize + ")"); + ") >= application buffer size (" + currentApplicationBufferSize + ")"); } - break; - } else if (unwrap.getStatus() == Status.BUFFER_UNDERFLOW) { + if (dst.hasRemaining()) + read += readFromAppBuffer(dst); + else + break; + } else if (unwrapResult.getStatus() == Status.BUFFER_UNDERFLOW) { int currentPacketBufferSize = packetBufferSize(); netReadBuffer = Utils.ensureCapacity(netReadBuffer, currentPacketBufferSize); break; - } else if (unwrap.getStatus() == Status.CLOSED) { + } else if (unwrapResult.getStatus() == Status.CLOSED) { throw new EOFException(); } - } while(netReadBuffer.hasRemaining()); + } while(netReadBuffer.position() != 0); } return read; } + /** + * Reads a sequence of bytes from this channel into the given buffers. + * + * @param dsts - The buffers into which bytes are to be transferred. + * @return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. + * @throws IOException if some other I/O error occurs + */ public long read(ByteBuffer[] dsts) throws IOException { return read(dsts, 0, dsts.length); } + + /** + * Reads a sequence of bytes from this channel into a subsequence of the given buffers. + * @param dsts - The buffers into which bytes are to be transferred + * @param offset - The offset within the buffer array of the first buffer into which bytes are to be transferred; must be non-negative and no larger than dsts.length. + * @param length - The maximum number of buffers to be accessed; must be non-negative and no larger than dsts.length - offset + * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. + * @throws IOException if some other I/O error occurs + */ public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + if ((offset < 0) || (length < 0) || (offset > dsts.length - length)) + throw new IndexOutOfBoundsException(); + int totalRead = 0; - for (int i = offset; i < length; i++) { - int read = read(dsts[i]); - if (read > 0) { - totalRead += read; + int i = offset; + while (i < length) { + if (dsts[i].hasRemaining()) { + int read = read(dsts[i]); + if (read > 0) { + totalRead += read; + } + } + if (!dsts[i].hasRemaining()) { + i++; } } return totalRead; } + /** * Writes a sequence of bytes to this channel from the given buffer. * * @param src The buffer from which bytes are to be retrieved - * @return The number of bytes written, possibly zero + * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream * @throws IOException If some other I/O error occurs */ public int write(ByteBuffer src) throws IOException { @@ -424,10 +467,12 @@ public class SSLTransportLayer implements TransportLayer { if (!flush(netWriteBuffer)) return written; + netWriteBuffer.clear(); SSLEngineResult wrap = sslEngine.wrap(src, netWriteBuffer); netWriteBuffer.flip(); + //handle ssl renegotiation if (wrap.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) { handshake(); return written; @@ -439,8 +484,8 @@ public class SSLTransportLayer implements TransportLayer { } else if (wrap.getStatus() == Status.BUFFER_OVERFLOW) { int currentPacketBufferSize = packetBufferSize(); netWriteBuffer = Utils.ensureCapacity(netReadBuffer, packetBufferSize()); - if (netWriteBuffer.position() > currentPacketBufferSize) - throw new IllegalStateException("SSL BUFFER_OVERFLOW when available data (" + netWriteBuffer.position() + ") > network buffer size (" + currentPacketBufferSize + ")"); + if (netWriteBuffer.position() >= currentPacketBufferSize) + throw new IllegalStateException("SSL BUFFER_OVERFLOW when available data size (" + netWriteBuffer.position() + ") >= network buffer size (" + currentPacketBufferSize + ")"); } else if (wrap.getStatus() == Status.BUFFER_UNDERFLOW) { throw new IllegalStateException("SSL BUFFER_UNDERFLOW during write"); } else if (wrap.getStatus() == Status.CLOSED) { @@ -449,45 +494,52 @@ public class SSLTransportLayer implements TransportLayer { return written; } + /** + * Writes a sequence of bytes to this channel from the subsequence of the given buffers. + * + * @param srcs The buffers from which bytes are to be retrieved + * @param offset The offset within the buffer array of the first buffer from which bytes are to be retrieved; must be non-negative and no larger than srcs.length. + * @param length - The maximum number of buffers to be accessed; must be non-negative and no larger than srcs.length - offset. + * @return returns no.of bytes written , possibly zero. + * @throws IOException If some other I/O error occurs + */ public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { - long totalWritten = 0; + if ((offset < 0) || (length < 0) || (offset > srcs.length - length)) + throw new IndexOutOfBoundsException(); + + int totalWritten = 0; int i = offset; while (i < length) { - if (srcs[i].hasRemaining()) { + if (srcs[i].hasRemaining() || pending()) { int written = write(srcs[i]); if (written > 0) { totalWritten += written; } } - i++; + + if (!srcs[i].hasRemaining()) { + i++; + } else { + // if we are unable to write the current buffer to socketChannel we should break, + // as we might have reached max socket send buffer size. + break; + } } return totalWritten; } + /** + * Writes a sequence of bytes to this channel from the given buffers. + * + * @param srcs The buffers from which bytes are to be retrieved + * @return returns no.of bytes consumed by SSLEngine.wrap , possibly zero. + * @throws IOException If some other I/O error occurs + */ + public long write(ByteBuffer[] srcs) throws IOException { return write(srcs, 0, srcs.length); } - /** - * socket's InputStream as DataInputStream - * @return DataInputStream - */ - public DataInputStream inStream() throws IOException { - if (inStream == null) - this.inStream = new DataInputStream(socketChannel.socket().getInputStream()); - return inStream; - } - - - /** - * socket's OutputStream as DataOutputStream - * @return DataInputStream - */ - public DataOutputStream outStream() throws IOException { - if (outStream == null) - this.outStream = new DataOutputStream(socketChannel.socket().getOutputStream()); - return outStream; - } /** * SSLSession's peerPrincipal for the remote host. diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index de0bbe9..d621f06 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -16,7 +16,6 @@ import java.io.EOFException; import java.io.IOException; import java.net.ConnectException; import java.net.InetSocketAddress; -import java.net.Socket; import java.nio.channels.CancelledKeyException; import java.nio.channels.SelectionKey; import java.nio.channels.SocketChannel; diff --git a/clients/src/main/java/org/apache/kafka/common/network/Send.java b/clients/src/main/java/org/apache/kafka/common/network/Send.java index 5d321a0..9cc2ada 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Send.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Send.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -14,7 +14,6 @@ package org.apache.kafka.common.network; import java.io.IOException; import java.nio.ByteBuffer; -import java.nio.channels.GatheringByteChannel; /** * This interface models the in-progress sending of data to a destination identified by an integer id. @@ -44,10 +43,10 @@ public interface Send { /** * Write some as-yet unwritten bytes from this send to the provided channel. It may take multiple calls for the send * to be completely written - * @param channel The channel to write to + * @param channel The transportLayer to write to * @return The number of bytes written * @throws IOException If the write fails */ - public long writeTo(GatheringByteChannel channel) throws IOException; + public long writeTo(TransportLayer transportLayer) throws IOException; } diff --git a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java index 8fdaae2..dd544a6 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java @@ -22,17 +22,12 @@ package org.apache.kafka.common.network; */ import java.io.IOException; import java.nio.ByteBuffer; -import java.nio.channels.ScatteringByteChannel; -import java.nio.channels.GatheringByteChannel; import java.nio.channels.SocketChannel; -import java.io.DataInputStream; -import java.io.DataOutputStream; - import java.security.Principal; -public interface TransportLayer extends ScatteringByteChannel, GatheringByteChannel { +public interface TransportLayer { /** * Returns true if the channel has handshake and authenticaiton done. @@ -55,6 +50,18 @@ public interface TransportLayer extends ScatteringByteChannel, GatheringByteChan SocketChannel socketChannel(); /** + * returns true if socketchannel is open. + */ + boolean isOpen(); + + public void close() throws IOException; + + /** + * returns true if there are any pending bytes needs to be written to channel. + */ + boolean pending(); + + /** * Performs SSL handshake hence is a no-op for the non-secure * implementation * @throws IOException @@ -62,9 +69,25 @@ public interface TransportLayer extends ScatteringByteChannel, GatheringByteChan void handshake() throws IOException; - DataInputStream inStream() throws IOException; + /** + * Reads sequence of bytes from the channel to given buffer + */ + public int read(ByteBuffer dst) throws IOException; + + public long read(ByteBuffer[] dsts) throws IOException; + + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException; + + /** + * Writes a sequence of bytes to this channel from the given buffer. + */ + public int write(ByteBuffer src) throws IOException; + + public long write(ByteBuffer[] srcs) throws IOException; + + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException; + - DataOutputStream outStream() throws IOException; /** * returns SSLSession.getPeerPrinicpal if SSLTransportLayer used diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java index f3f8334..a38f189 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java @@ -18,12 +18,16 @@ package org.apache.kafka.clients.producer; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.serialization.ByteArraySerializer; +import org.apache.kafka.common.config.SecurityConfigs; +import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.test.MockMetricsReporter; import org.apache.kafka.test.MockSerializer; import org.junit.Assert; import org.junit.Test; import java.util.Properties; +import java.util.Map; +import java.util.HashMap; public class KafkaProducerTest { @@ -50,17 +54,18 @@ public class KafkaProducerTest { } @Test - public void testSerializerClose() { - Properties props = new Properties(); - props.setProperty(ProducerConfig.CLIENT_ID_CONFIG, "testConstructorClose"); - props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); - + public void testSerializerClose() throws Exception { + Map configs = new HashMap(); + configs.put(ProducerConfig.CLIENT_ID_CONFIG, "testConstructorClose"); + configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + configs.put(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); + configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL); + configs.put(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SecurityConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); final int oldInitCount = MockSerializer.INIT_COUNT.get(); final int oldCloseCount = MockSerializer.CLOSE_COUNT.get(); KafkaProducer producer = new KafkaProducer( - props, new MockSerializer(), new MockSerializer()); + configs, new MockSerializer(), new MockSerializer()); Assert.assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get()); Assert.assertEquals(oldCloseCount, MockSerializer.CLOSE_COUNT.get()); diff --git a/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java b/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java index ce4c201..37d1706 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java +++ b/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java @@ -12,7 +12,6 @@ */ package org.apache.kafka.common.network; -import org.apache.kafka.common.config.SecurityConfigs; import org.apache.kafka.common.protocol.SecurityProtocol; import javax.net.ssl.SSLContext; @@ -42,8 +41,8 @@ class EchoServer extends Thread { private final AtomicBoolean renegotiate = new AtomicBoolean(); public EchoServer(Map configs) throws Exception { - this.protocol = configs.containsKey(SecurityConfigs.SECURITY_PROTOCOL_CONFIG) ? - SecurityProtocol.valueOf((String) configs.get(SecurityConfigs.SECURITY_PROTOCOL_CONFIG)) : SecurityProtocol.PLAINTEXT; + this.protocol = configs.containsKey("security.protocol") ? + SecurityProtocol.valueOf((String) configs.get("security.protocol")) : SecurityProtocol.PLAINTEXT; if (protocol == SecurityProtocol.SSL) { this.sslFactory = new SSLFactory(SSLFactory.Mode.SERVER); this.sslFactory.configure(configs); diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java index 5ec02ef..a5351ce 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java @@ -17,22 +17,13 @@ import static org.junit.Assert.assertTrue; import java.util.LinkedHashMap; import java.util.Map; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Semaphore; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; -import java.nio.channels.SocketChannel; -import java.nio.channels.SelectionKey; -import javax.net.ssl.SSLEngine; -import javax.net.ssl.SSLEngineResult; import org.apache.kafka.common.config.SecurityConfigs; -import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.metrics.Metrics; -import org.apache.kafka.common.security.auth.PrincipalBuilder; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.test.TestSSLUtils; @@ -151,18 +142,6 @@ public class SSLSelectorTest { assertEquals("", blockingRequest(node, "")); } - /** - * Test sending an small string - */ - @Test - public void testIncompleteSend() throws Exception { - int bufferSize = 16391; - int node = 0; - InetSocketAddress addr = new InetSocketAddress("localhost", server.port); - selector.connect(node, addr, bufferSize, bufferSize); - String requestPrefix = TestUtils.randomString(bufferSize); - assertEquals(requestPrefix, blockingRequest(node, requestPrefix)); - } @Test public void testMute() throws Exception { @@ -193,7 +172,6 @@ public class SSLSelectorTest { public void testRenegotiation() throws Exception { int reqs = 500; int node = 0; - // create connections InetSocketAddress addr = new InetSocketAddress("localhost", server.port); selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); @@ -217,9 +195,9 @@ public class SSLSelectorTest { // handle any responses we may have gotten for (NetworkReceive receive : selector.completedReceives()) { String[] pieces = asString(receive).split("-"); - assertEquals("Receive text should be in the form 'conn-counter'", 2, pieces.length); + assertEquals("Should be in the form 'conn-counter'", 2, pieces.length); assertEquals("Check the source", receive.source(), Integer.parseInt(pieces[0])); - assertEquals("Receive ByteBuffer position should be at 0", 0, receive.payload().position()); + assertEquals("Check that the receive has kindly been rewound", 0, receive.payload().position()); assertEquals("Check the request counter", responses, Integer.parseInt(pieces[1])); responses++; } diff --git a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java index 093f33b..4edbe36 100644 --- a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java +++ b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java @@ -19,6 +19,7 @@ package org.apache.kafka.test; import org.apache.kafka.common.config.SecurityConfigs; import org.apache.kafka.common.network.SSLFactory; +import org.apache.kafka.clients.CommonClientConfigs; import java.io.File; import java.io.FileOutputStream; @@ -44,10 +45,6 @@ import org.bouncycastle.operator.DefaultDigestAlgorithmIdentifierFinder; import org.bouncycastle.operator.DefaultSignatureAlgorithmIdentifierFinder; import org.bouncycastle.operator.bc.BcRSAContentSignerBuilder; - - - - import java.util.Date; import java.util.HashMap; import java.util.Map; @@ -174,7 +171,7 @@ public class TestSSLUtils { public static Map createSSLConfig(SSLFactory.Mode mode, File keyStoreFile, String password, String keyPassword, File trustStoreFile, String trustStorePassword, boolean useClientCert) { Map sslConfigs = new HashMap(); - sslConfigs.put(SecurityConfigs.SECURITY_PROTOCOL_CONFIG, "SSL"); // kafka security protocol + sslConfigs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL"); // kafka security protocol sslConfigs.put(SecurityConfigs.SSL_PROTOCOL_CONFIG, "TLSv1.2"); // protocol to create SSLContext if (mode == SSLFactory.Mode.SERVER || (mode == SSLFactory.Mode.CLIENT && keyStoreFile != null)) { -- 2.4.6 From 050782b9f47f4c61b22ef065ec4798ccbdb962d3 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Tue, 16 Jun 2015 08:46:05 -0700 Subject: [PATCH 14/30] KAFKA-1690. Broker side ssl changes. --- .../java/org/apache/kafka/clients/ClientUtils.java | 3 +- .../kafka/clients/consumer/ConsumerConfig.java | 35 ++-- .../kafka/clients/consumer/KafkaConsumer.java | 2 +- .../kafka/clients/producer/ProducerConfig.java | 32 ++-- .../org/apache/kafka/common/config/SSLConfigs.java | 97 ++++++++++ .../kafka/common/config/SecurityConfigs.java | 102 ----------- .../kafka/common/network/ByteBufferReceive.java | 5 +- .../kafka/common/network/ByteBufferSend.java | 12 +- .../org/apache/kafka/common/network/Channel.java | 16 +- .../kafka/common/network/ChannelBuilder.java | 6 +- .../kafka/common/network/NetworkReceive.java | 14 +- .../common/network/PlainTextChannelBuilder.java | 6 +- .../common/network/PlainTextTransportLayer.java | 4 - .../kafka/common/network/SSLChannelBuilder.java | 13 +- .../apache/kafka/common/network/SSLFactory.java | 52 +++--- .../kafka/common/network/SSLTransportLayer.java | 6 +- .../org/apache/kafka/common/network/Selector.java | 39 ++-- .../java/org/apache/kafka/common/network/Send.java | 2 +- .../kafka/common/network/TransportLayer.java | 33 +--- .../kafka/clients/producer/KafkaProducerTest.java | 4 +- .../kafka/common/network/SSLSelectorTest.java | 128 ++++++------- .../apache/kafka/common/network/SelectorTest.java | 10 +- .../java/org/apache/kafka/test/TestSSLUtils.java | 25 ++- core/src/main/scala/kafka/cluster/EndPoint.scala | 2 +- .../main/scala/kafka/network/SocketServer.scala | 111 ++++++----- core/src/main/scala/kafka/server/KafkaConfig.scala | 153 +++++++++++++++- core/src/main/scala/kafka/server/KafkaServer.scala | 3 +- .../integration/UncleanLeaderElectionTest.scala | 6 +- .../unit/kafka/network/SocketServerTest.scala | 202 --------------------- .../kafka/server/KafkaConfigConfigDefTest.scala | 35 ++++ 30 files changed, 567 insertions(+), 591 deletions(-) create mode 100644 clients/src/main/java/org/apache/kafka/common/config/SSLConfigs.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/config/SecurityConfigs.java delete mode 100644 core/src/test/scala/unit/kafka/network/SocketServerTest.scala diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index 782d182..d70ad33 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -23,6 +23,7 @@ import org.apache.kafka.common.protocol.SecurityProtocol; import org.apache.kafka.common.network.ChannelBuilder; import org.apache.kafka.common.network.SSLChannelBuilder; import org.apache.kafka.common.network.PlainTextChannelBuilder; +import org.apache.kafka.common.network.SSLFactory; import org.apache.kafka.common.config.ConfigException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -77,7 +78,7 @@ public class ClientUtils { SecurityProtocol securityProtocol = SecurityProtocol.valueOf((String) configs.get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); if (securityProtocol == SecurityProtocol.SSL) { - channelBuilder = new SSLChannelBuilder(); + channelBuilder = new SSLChannelBuilder(SSLFactory.Mode.CLIENT); } else { channelBuilder = new PlainTextChannelBuilder(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java index 3027674..a5de3ea 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java @@ -19,7 +19,7 @@ import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Importance; import org.apache.kafka.common.config.ConfigDef.Type; import org.apache.kafka.common.serialization.Deserializer; -import org.apache.kafka.common.config.SecurityConfigs; +import org.apache.kafka.common.config.SSLConfigs; import java.util.HashMap; import java.util.Map; @@ -160,9 +160,6 @@ public class ConsumerConfig extends AbstractConfig { /** connections.max.idle.ms */ public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG = CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG; - /** connections.max.idle.ms */ - public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG = CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG; - static { CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, @@ -286,21 +283,21 @@ public class ConsumerConfig extends AbstractConfig { Importance.HIGH, VALUE_DESERIALIZER_CLASS_DOC) .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL, Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) - .define(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Type.CLASS, SecurityConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS, Importance.LOW, SecurityConfigs.PRINCIPAL_BUILDER_CLASS_DOC) - .define(SecurityConfigs.SSL_PROTOCOL_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_PROTOCOL, Importance.MEDIUM, SecurityConfigs.SSL_PROTOCOL_DOC) - .define(SecurityConfigs.SSL_PROVIDER_CONFIG, Type.STRING, Importance.MEDIUM, SecurityConfigs.SSL_PROVIDER_DOC, false) - .define(SecurityConfigs.SSL_CIPHER_SUITES_CONFIG, Type.LIST, Importance.LOW, SecurityConfigs.SSL_CIPHER_SUITES_DOC, false) - .define(SecurityConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Type.LIST, SecurityConfigs.DEFAULT_ENABLED_PROTOCOLS, Importance.MEDIUM, SecurityConfigs.SSL_ENABLED_PROTOCOLS_DOC) - .define(SecurityConfigs.SSL_KEYSTORE_TYPE_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_KEYSTORE_TYPE, Importance.MEDIUM, SecurityConfigs.SSL_KEYSTORE_TYPE_DOC) - .define(SecurityConfigs.SSL_KEYSTORE_LOCATION_CONFIG, Type.STRING, Importance.HIGH, SecurityConfigs.SSL_KEYSTORE_LOCATION_DOC, false) - .define(SecurityConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, SecurityConfigs.SSL_KEYSTORE_PASSWORD_DOC, false) - .define(SecurityConfigs.SSL_KEY_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, SecurityConfigs.SSL_KEY_PASSWORD_DOC, false) - .define(SecurityConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE, Importance.MEDIUM, SecurityConfigs.SSL_TRUSTSTORE_TYPE_DOC) - .define(SecurityConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_TRUSTSTORE_LOCATION, Importance.HIGH, SecurityConfigs.SSL_TRUSTSTORE_LOCATION_DOC) - .define(SecurityConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_TRUSTSTORE_PASSWORD, Importance.HIGH, SecurityConfigs.SSL_TRUSTSTORE_PASSWORD_DOC) - .define(SecurityConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM, Importance.LOW, SecurityConfigs.SSL_KEYMANAGER_ALGORITHM_DOC) - .define(SecurityConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM, Importance.LOW, SecurityConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC) - .define(SecurityConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, Type.STRING, Importance.LOW, SecurityConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC, false) + .define(SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Type.CLASS, SSLConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS, Importance.LOW, SSLConfigs.PRINCIPAL_BUILDER_CLASS_DOC) + .define(SSLConfigs.SSL_PROTOCOL_CONFIG, Type.STRING, SSLConfigs.DEFAULT_SSL_PROTOCOL, Importance.MEDIUM, SSLConfigs.SSL_PROTOCOL_DOC) + .define(SSLConfigs.SSL_PROVIDER_CONFIG, Type.STRING, Importance.MEDIUM, SSLConfigs.SSL_PROVIDER_DOC, false) + .define(SSLConfigs.SSL_CIPHER_SUITES_CONFIG, Type.LIST, Importance.LOW, SSLConfigs.SSL_CIPHER_SUITES_DOC, false) + .define(SSLConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Type.LIST, SSLConfigs.DEFAULT_ENABLED_PROTOCOLS, Importance.MEDIUM, SSLConfigs.SSL_ENABLED_PROTOCOLS_DOC) + .define(SSLConfigs.SSL_KEYSTORE_TYPE_CONFIG, Type.STRING, SSLConfigs.DEFAULT_SSL_KEYSTORE_TYPE, Importance.MEDIUM, SSLConfigs.SSL_KEYSTORE_TYPE_DOC) + .define(SSLConfigs.SSL_KEYSTORE_LOCATION_CONFIG, Type.STRING, Importance.HIGH, SSLConfigs.SSL_KEYSTORE_LOCATION_DOC, false) + .define(SSLConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, SSLConfigs.SSL_KEYSTORE_PASSWORD_DOC, false) + .define(SSLConfigs.SSL_KEY_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, SSLConfigs.SSL_KEY_PASSWORD_DOC, false) + .define(SSLConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, Type.STRING, SSLConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE, Importance.MEDIUM, SSLConfigs.SSL_TRUSTSTORE_TYPE_DOC) + .define(SSLConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, Type.STRING, SSLConfigs.DEFAULT_TRUSTSTORE_LOCATION, Importance.HIGH, SSLConfigs.SSL_TRUSTSTORE_LOCATION_DOC) + .define(SSLConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, Type.STRING, SSLConfigs.DEFAULT_TRUSTSTORE_PASSWORD, Importance.HIGH, SSLConfigs.SSL_TRUSTSTORE_PASSWORD_DOC) + .define(SSLConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, Type.STRING, SSLConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM, Importance.LOW, SSLConfigs.SSL_KEYMANAGER_ALGORITHM_DOC) + .define(SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, Type.STRING, SSLConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM, Importance.LOW, SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC) + .define(SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, Type.STRING, Importance.LOW, SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC, false) /* default is set to be a bit lower than the server default (10 min), to avoid both client and server closing connection at same time */ .define(CONNECTIONS_MAX_IDLE_MS_CONFIG, Type.LONG, diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java index 6a12ac6..9f9d954 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java @@ -474,7 +474,7 @@ public class KafkaConsumer implements Consumer { metricsTags.put("client-id", clientId); ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config.values()); this.client = new NetworkClient( - new Selector(config.getLong(ConsumerConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, metricGrpPrefix, metricsTags, ChannelBuilder), + new Selector(config.getLong(ConsumerConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, metricGrpPrefix, metricsTags, channelBuilder), this.metadata, clientId, 100, // a fixed large enough value will suffice diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java index 1bae0ca..06f00a9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java @@ -22,7 +22,7 @@ import java.util.Properties; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.common.config.AbstractConfig; -import org.apache.kafka.common.config.SecurityConfigs; +import org.apache.kafka.common.config.SSLConfigs; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigDef.Importance; import org.apache.kafka.common.config.ConfigDef.Type; @@ -227,21 +227,21 @@ public class ProducerConfig extends AbstractConfig { .define(KEY_SERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, KEY_SERIALIZER_CLASS_DOC) .define(VALUE_SERIALIZER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, VALUE_SERIALIZER_CLASS_DOC) .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL, Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) - .define(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Type.CLASS, SecurityConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS, Importance.LOW, SecurityConfigs.PRINCIPAL_BUILDER_CLASS_DOC) - .define(SecurityConfigs.SSL_PROTOCOL_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_PROTOCOL, Importance.MEDIUM, SecurityConfigs.SSL_PROTOCOL_DOC) - .define(SecurityConfigs.SSL_PROVIDER_CONFIG, Type.STRING, Importance.MEDIUM, SecurityConfigs.SSL_PROVIDER_DOC, false) - .define(SecurityConfigs.SSL_CIPHER_SUITES_CONFIG, Type.LIST, Importance.LOW, SecurityConfigs.SSL_CIPHER_SUITES_DOC, false) - .define(SecurityConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Type.LIST, SecurityConfigs.DEFAULT_ENABLED_PROTOCOLS, Importance.MEDIUM, SecurityConfigs.SSL_ENABLED_PROTOCOLS_DOC) - .define(SecurityConfigs.SSL_KEYSTORE_TYPE_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_KEYSTORE_TYPE, Importance.MEDIUM, SecurityConfigs.SSL_KEYSTORE_TYPE_DOC) - .define(SecurityConfigs.SSL_KEYSTORE_LOCATION_CONFIG, Type.STRING, Importance.HIGH, SecurityConfigs.SSL_KEYSTORE_LOCATION_DOC, false) - .define(SecurityConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, SecurityConfigs.SSL_KEYSTORE_PASSWORD_DOC, false) - .define(SecurityConfigs.SSL_KEY_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, SecurityConfigs.SSL_KEY_PASSWORD_DOC, false) - .define(SecurityConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE, Importance.MEDIUM, SecurityConfigs.SSL_TRUSTSTORE_TYPE_DOC) - .define(SecurityConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_TRUSTSTORE_LOCATION, Importance.HIGH, SecurityConfigs.SSL_TRUSTSTORE_LOCATION_DOC) - .define(SecurityConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_TRUSTSTORE_PASSWORD, Importance.HIGH, SecurityConfigs.SSL_TRUSTSTORE_PASSWORD_DOC) - .define(SecurityConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM, Importance.LOW, SecurityConfigs.SSL_KEYMANAGER_ALGORITHM_DOC) - .define(SecurityConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, Type.STRING, SecurityConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM, Importance.LOW, SecurityConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC) - .define(SecurityConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, Type.STRING, Importance.LOW, SecurityConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC, false); + .define(SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Type.CLASS, SSLConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS, Importance.LOW, SSLConfigs.PRINCIPAL_BUILDER_CLASS_DOC) + .define(SSLConfigs.SSL_PROTOCOL_CONFIG, Type.STRING, SSLConfigs.DEFAULT_SSL_PROTOCOL, Importance.MEDIUM, SSLConfigs.SSL_PROTOCOL_DOC) + .define(SSLConfigs.SSL_PROVIDER_CONFIG, Type.STRING, Importance.MEDIUM, SSLConfigs.SSL_PROVIDER_DOC, false) + .define(SSLConfigs.SSL_CIPHER_SUITES_CONFIG, Type.LIST, Importance.LOW, SSLConfigs.SSL_CIPHER_SUITES_DOC, false) + .define(SSLConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Type.LIST, SSLConfigs.DEFAULT_ENABLED_PROTOCOLS, Importance.MEDIUM, SSLConfigs.SSL_ENABLED_PROTOCOLS_DOC) + .define(SSLConfigs.SSL_KEYSTORE_TYPE_CONFIG, Type.STRING, SSLConfigs.DEFAULT_SSL_KEYSTORE_TYPE, Importance.MEDIUM, SSLConfigs.SSL_KEYSTORE_TYPE_DOC) + .define(SSLConfigs.SSL_KEYSTORE_LOCATION_CONFIG, Type.STRING, Importance.HIGH, SSLConfigs.SSL_KEYSTORE_LOCATION_DOC, false) + .define(SSLConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, SSLConfigs.SSL_KEYSTORE_PASSWORD_DOC, false) + .define(SSLConfigs.SSL_KEY_PASSWORD_CONFIG, Type.STRING, Importance.HIGH, SSLConfigs.SSL_KEY_PASSWORD_DOC, false) + .define(SSLConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, Type.STRING, SSLConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE, Importance.MEDIUM, SSLConfigs.SSL_TRUSTSTORE_TYPE_DOC) + .define(SSLConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, Type.STRING, SSLConfigs.DEFAULT_TRUSTSTORE_LOCATION, Importance.HIGH, SSLConfigs.SSL_TRUSTSTORE_LOCATION_DOC) + .define(SSLConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, Type.STRING, SSLConfigs.DEFAULT_TRUSTSTORE_PASSWORD, Importance.HIGH, SSLConfigs.SSL_TRUSTSTORE_PASSWORD_DOC) + .define(SSLConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, Type.STRING, SSLConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM, Importance.LOW, SSLConfigs.SSL_KEYMANAGER_ALGORITHM_DOC) + .define(SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, Type.STRING, SSLConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM, Importance.LOW, SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC) + .define(SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, Type.STRING, Importance.LOW, SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC, false) /* default is set to be a bit lower than the server default (10 min), to avoid both client and server closing connection at same time */ .define(CONNECTIONS_MAX_IDLE_MS_CONFIG, Type.LONG, 9 * 60 * 1000, Importance.MEDIUM, CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_DOC) .define(PARTITIONER_CLASS_CONFIG, Type.CLASS, "org.apache.kafka.clients.producer.internals.DefaultPartitioner", Importance.MEDIUM, PARTITIONER_CLASS_DOC); diff --git a/clients/src/main/java/org/apache/kafka/common/config/SSLConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/SSLConfigs.java new file mode 100644 index 0000000..e861358 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/config/SSLConfigs.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package org.apache.kafka.common.config; + +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.KeyManagerFactory; + +public class SSLConfigs { + /* + * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. + */ + + public static final String PRINCIPAL_BUILDER_CLASS_CONFIG = "principal.builder.class"; + public static final String PRINCIPAL_BUILDER_CLASS_DOC = "principal builder to generate a java Principal. This config is optional for client."; + public static final String DEFAULT_PRINCIPAL_BUILDER_CLASS = "org.apache.kafka.common.security.auth.DefaultPrincipalBuilder"; + + public static final String SSL_PROTOCOL_CONFIG = "ssl.protocol"; + public static final String SSL_PROTOCOL_DOC = "The ssl protocol used to generate SSLContext." + + "Default setting is TLS. Allowed values are SSL, SSLv2, SSLv3, TLS, TLSv1.1, TLSv1.2"; + public static final String DEFAULT_SSL_PROTOCOL = "TLS"; + + public static final String SSL_PROVIDER_CONFIG = "ssl.provider"; + public static final String SSL_PROVIDER_DOC = "The name of the security provider used for SSL connections. Default value is the default security provider of the JVM."; + + public static final String SSL_CIPHER_SUITES_CONFIG = "ssl.cipher.suites"; + public static final String SSL_CIPHER_SUITES_DOC = "A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol." + + "By default all the available cipher suites are supported."; + + public static final String SSL_ENABLED_PROTOCOLS_CONFIG = "ssl.enabled.protocols"; + public static final String SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. " + + "All versions of TLS is enabled by default."; + public static final String DEFAULT_ENABLED_PROTOCOLS = "TLSv1.2,TLSv1.1,TLSv1"; + + public static final String SSL_KEYSTORE_TYPE_CONFIG = "ssl.keystore.type"; + public static final String SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. " + + "This is optional for client. Default value is JKS"; + public static final String DEFAULT_SSL_KEYSTORE_TYPE = "JKS"; + + public static final String SSL_KEYSTORE_LOCATION_CONFIG = "ssl.keystore.location"; + public static final String SSL_KEYSTORE_LOCATION_DOC = "The location of the key store file. " + + "This is optional for Client and can be used for two-way authentication for client."; + + public static final String SSL_KEYSTORE_PASSWORD_CONFIG = "ssl.keystore.password"; + public static final String SSL_KEYSTORE_PASSWORD_DOC = "The store password for the key store file." + + "This is optional for client and only needed if the ssl.keystore.location configured. "; + + public static final String SSL_KEY_PASSWORD_CONFIG = "ssl.key.password"; + public static final String SSL_KEY_PASSWORD_DOC = "The password of the private key in the key store file. " + + "This is optional for client."; + + public static final String SSL_TRUSTSTORE_TYPE_CONFIG = "ssl.truststore.type"; + public static final String SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. " + + "Default value is JKS."; + public static final String DEFAULT_SSL_TRUSTSTORE_TYPE = "JKS"; + + public static final String SSL_TRUSTSTORE_LOCATION_CONFIG = "ssl.truststore.location"; + public static final String SSL_TRUSTSTORE_LOCATION_DOC = "The location of the trust store file. "; + public static final String DEFAULT_TRUSTSTORE_LOCATION = "/tmp/ssl.truststore.jks"; + + public static final String SSL_TRUSTSTORE_PASSWORD_CONFIG = "ssl.truststore.password"; + public static final String SSL_TRUSTSTORE_PASSWORD_DOC = "The password for the trust store file. "; + public static final String DEFAULT_TRUSTSTORE_PASSWORD = "truststore_password"; + + public static final String SSL_KEYMANAGER_ALGORITHM_CONFIG = "ssl.keymanager.algorithm"; + public static final String SSL_KEYMANAGER_ALGORITHM_DOC = "The algorithm used by key manager factory for SSL connections. " + + "Default value is the key manager factory algorithm configured for the Java Virtual Machine."; + public static final String DEFAULT_SSL_KEYMANGER_ALGORITHM = KeyManagerFactory.getDefaultAlgorithm(); + + public static final String SSL_TRUSTMANAGER_ALGORITHM_CONFIG = "ssl.trustmanager.algorithm"; + public static final String SSL_TRUSTMANAGER_ALGORITHM_DOC = "The algorithm used by trust manager factory for SSL connections. " + + "Default value is the trust manager factory algorithm configured for the Java Virtual Machine."; + public static final String DEFAULT_SSL_TRUSTMANAGER_ALGORITHM = TrustManagerFactory.getDefaultAlgorithm(); + + public static final String SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG = "ssl.endpoint.identification.algorithm"; + public static final String SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC = "The endpoint identification algorithm to validate server hostname using server certificate. "; + + public static final String SSL_NEED_CLIENT_AUTH_CONFIG = "ssl.need.client.auth"; + public static final String SSL_NEED_CLIENT_AUTH_DOC = "If set to true kafka broker requires all the ssl client connecting to provide client authentication. " + + "Default value is false"; + public static final Boolean DEFAULT_SSL_NEED_CLIENT_AUTH = false; + + public static final String SSL_WANT_CLIENT_AUTH_CONFIG = "ssl.want.client.auth"; + public static final String SSL_WANT_CLIENT_AUTH_DOC = "If set to true kafka broker requests for client authentication. Clients without any certificates can still be able to connect using SSL."; + public static final Boolean DEFAULT_SSL_WANT_CLIENT_AUTH = false; + +} diff --git a/clients/src/main/java/org/apache/kafka/common/config/SecurityConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/SecurityConfigs.java deleted file mode 100644 index c2f2181..0000000 --- a/clients/src/main/java/org/apache/kafka/common/config/SecurityConfigs.java +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package org.apache.kafka.common.config; - -import javax.net.ssl.TrustManagerFactory; -import javax.net.ssl.KeyManagerFactory; - -public class SecurityConfigs { - /* - * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. - */ - - public static final String PRINCIPAL_BUILDER_CLASS_CONFIG = "principal.builder.class"; - public static final String PRINCIPAL_BUILDER_CLASS_DOC = "principal builder to generate a java Principal. This config is optional for client."; - public static final String DEFAULT_PRINCIPAL_BUILDER_CLASS = "org.apache.kafka.common.security.auth.DefaultPrincipalBuilder"; - - public static final String SSL_PROTOCOL_CONFIG = "ssl.protocol"; - public static final String SSL_PROTOCOL_DOC = "The ssl protocol used to generate SSLContext." - + "Default setting is TLS. Allowed values are SSL, SSLv2, SSLv3, TLS, TLSv1.1, TLSv1.2"; - public static final String DEFAULT_SSL_PROTOCOL = "TLS"; - - public static final String SSL_PROVIDER_CONFIG = "ssl.provider"; - public static final String SSL_PROVIDER_DOC = "The name of the security provider used for SSL connections. Default value is the default security provider of the JVM."; - - public static final String SSL_CIPHER_SUITES_CONFIG = "ssl.cipher.suites"; - public static final String SSL_CIPHER_SUITES_DOC = "A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol." - + "By default all the available cipher suites are supported."; - - public static final String SSL_ENABLED_PROTOCOLS_CONFIG = "ssl.enabled.protocols"; - public static final String SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. " - + "All versions of TLS is enabled by default."; - public static final String DEFAULT_ENABLED_PROTOCOLS = "TLSv1.2,TLSv1.1,TLSv1"; - - public static final String SSL_KEYSTORE_TYPE_CONFIG = "ssl.keystore.type"; - public static final String SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. " - + "This is optional for client. Default value is JKS"; - public static final String DEFAULT_SSL_KEYSTORE_TYPE = "JKS"; - - public static final String SSL_KEYSTORE_LOCATION_CONFIG = "ssl.keystore.location"; - public static final String SSL_KEYSTORE_LOCATION_DOC = "The location of the key store file. " - + "This is optional for Client and can be used for two-way authentication for client."; - - public static final String SSL_KEYSTORE_PASSWORD_CONFIG = "ssl.keystore.password"; - public static final String SSL_KEYSTORE_PASSWORD_DOC = "The store password for the key store file." - + "This is optional for client and only needed if the ssl.keystore.location configured. "; - - public static final String SSL_KEY_PASSWORD_CONFIG = "ssl.key.password"; - public static final String SSL_KEY_PASSWORD_DOC = "The password of the private key in the key store file. " - + "This is optional for client."; - - public static final String SSL_TRUSTSTORE_TYPE_CONFIG = "ssl.truststore.type"; - public static final String SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. " - + "Default value is JKS."; - public static final String DEFAULT_SSL_TRUSTSTORE_TYPE = "JKS"; - - public static final String SSL_TRUSTSTORE_LOCATION_CONFIG = "ssl.truststore.location"; - public static final String SSL_TRUSTSTORE_LOCATION_DOC = "The location of the trust store file. "; - public static final String DEFAULT_TRUSTSTORE_LOCATION = "/tmp/ssl.truststore.jks"; - - public static final String SSL_TRUSTSTORE_PASSWORD_CONFIG = "ssl.truststore.password"; - public static final String SSL_TRUSTSTORE_PASSWORD_DOC = "The password for the trust store file. "; - public static final String DEFAULT_TRUSTSTORE_PASSWORD = "truststore_password"; - - public static final String SSL_CLIENT_REQUIRE_CERT_CONFIG = "ssl.client.require.cert"; - public static final String SSL_CLIENT_REQUIRE_CERT_DOC = "This is to enforce two-way authentication between client and server." - + "Default value is false. If set to true client needs to provide Keystore related config"; - public static final Boolean DEFAULT_SSL_CLIENT_REQUIRE_CERT = false; - - public static final String SSL_KEYMANAGER_ALGORITHM_CONFIG = "ssl.keymanager.algorithm"; - public static final String SSL_KEYMANAGER_ALGORITHM_DOC = "The algorithm used by key manager factory for SSL connections. " - + "Default value is the key manager factory algorithm configured for the Java Virtual Machine."; - public static final String DEFAULT_SSL_KEYMANGER_ALGORITHM = KeyManagerFactory.getDefaultAlgorithm(); - - public static final String SSL_TRUSTMANAGER_ALGORITHM_CONFIG = "ssl.trustmanager.algorithm"; - public static final String SSL_TRUSTMANAGER_ALGORITHM_DOC = "The algorithm used by trust manager factory for SSL connections. " - + "Default value is the trust manager factory algorithm configured for the Java Virtual Machine."; - public static final String DEFAULT_SSL_TRUSTMANAGER_ALGORITHM = TrustManagerFactory.getDefaultAlgorithm(); - - public static final String SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG = "ssl.endpoint.identification.algorithm"; - public static final String SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC = "The endpoint identification algorithm to validate server hostname using server certificate. "; - - public static final String SSL_NEED_CLIENT_AUTH_CONFIG = "ssl.need.client.auth"; - public static final String SSL_NEED_CLIENT_AUTH_CONFIG_DOC = "If set to true kafka broker requires all the ssl client connecting to provide client authentication. " - + "Default value is false"; - public static final Boolean DEFAULT_SSL_NEED_CLIENT_AUTH = false; - - public static final String SSL_WANT_CLIENT_AUTH_CONFIG = "ssl.want.client.auth"; - public static final String SSL_WANT_CLIENT_AUTH_CONFIG_DOC = "If set to true kafka broker requests for client authentication. Clients without any certificates can still be able to connect using SSL."; - public static final Boolean DEFAULT_SSL_WANT_CLIENT_AUTH = false; - -} diff --git a/clients/src/main/java/org/apache/kafka/common/network/ByteBufferReceive.java b/clients/src/main/java/org/apache/kafka/common/network/ByteBufferReceive.java index 44cf70a..159c301 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ByteBufferReceive.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ByteBufferReceive.java @@ -18,6 +18,7 @@ package org.apache.kafka.common.network; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.channels.ScatteringByteChannel; /** * A receive backed by an array of ByteBuffers @@ -47,8 +48,8 @@ public class ByteBufferReceive implements Receive { } @Override - public long readFrom(TransportLayer transportLayer) throws IOException { - long read = transportLayer.read(buffers); + public long readFrom(ScatteringByteChannel channel) throws IOException { + long read = channel.read(buffers); remaining += read; return read; } diff --git a/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java b/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java index 9ab6dc7..85babbd 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java @@ -15,6 +15,7 @@ package org.apache.kafka.common.network; import java.io.EOFException; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.channels.GatheringByteChannel; /** * A send backed by an array of byte buffers @@ -52,12 +53,17 @@ public class ByteBufferSend implements Send { } @Override - public long writeTo(TransportLayer transportLayer) throws IOException { - long written = transportLayer.write(buffers); + public long writeTo(GatheringByteChannel channel) throws IOException { + long written = channel.write(buffers); if (written < 0) throw new EOFException("Wrote negative bytes to channel. This shouldn't happen."); remaining -= written; - pending = transportLayer.pending(); + // This is temporary workaround. As Send , Receive interfaces are being used by BlockingChannel. + // Once BlockingChannel is removed we can make Send, Receive to work with transportLayer rather than + // GatheringByteChannel or ScatteringByteChannel. + if (channel instanceof SSLTransportLayer) { + pending = ((SSLTransportLayer) channel).pending(); + } return written; } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/Channel.java b/clients/src/main/java/org/apache/kafka/common/network/Channel.java index fc285b8..9f76319 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Channel.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Channel.java @@ -31,13 +31,13 @@ import org.slf4j.LoggerFactory; public class Channel { private static final Logger log = LoggerFactory.getLogger(Channel.class); - private final int id; + private final String id; private TransportLayer transportLayer; private Authenticator authenticator; private NetworkReceive receive; - private NetworkSend send; + private Send send; - public Channel(int id, TransportLayer transportLayer, Authenticator authenticator) throws IOException { + public Channel(String id, TransportLayer transportLayer, Authenticator authenticator) throws IOException { this.id = id; this.transportLayer = transportLayer; this.authenticator = authenticator; @@ -78,7 +78,7 @@ public class Channel { transportLayer.finishConnect(); } - public int id() { + public String id() { return id; } @@ -104,7 +104,7 @@ public class Channel { return socket.getLocalAddress().toString(); } - public void setSend(NetworkSend send) { + public void setSend(Send send) { if (this.send != null) throw new IllegalStateException("Attempt to begin a send operation with prior send operation still in progress."); this.send = send; @@ -126,8 +126,8 @@ public class Channel { return result; } - public NetworkSend write() throws IOException { - NetworkSend result = null; + public Send write() throws IOException { + Send result = null; if (send != null && send(send)) { result = send; send = null; @@ -140,7 +140,7 @@ public class Channel { return result; } - private boolean send(NetworkSend send) throws IOException { + private boolean send(Send send) throws IOException { send.writeTo(transportLayer); if (send.completed()) { transportLayer.removeInterestOps(SelectionKey.OP_WRITE); diff --git a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java index 8cdd0fc..a9a88db 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java @@ -30,10 +30,10 @@ public interface ChannelBuilder { /** * returns a Channel with TransportLayer and Authenticator configured. - * @param id - * @param socketChannel + * @param id channel id + * @param key SelectionKey */ - public Channel buildChannel(int id, SelectionKey key) throws KafkaException; + public Channel buildChannel(String id, SelectionKey key) throws KafkaException; /** diff --git a/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java b/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java index 0090e62..4fbc53c 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java +++ b/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java @@ -67,18 +67,18 @@ public class NetworkReceive implements Receive { return !size.hasRemaining() && !buffer.hasRemaining(); } - public long readFrom(ScatteringByteChannel transportLayer) throws IOException { - return readFromReadableChannel(transportLayer); + public long readFrom(ScatteringByteChannel channel) throws IOException { + return readFromReadableChannel(channel); } // Need a method to read from ReadableByteChannel because BlockingChannel requires read with timeout // See: http://stackoverflow.com/questions/2866557/timeout-for-socketchannel-doesnt-work // This can go away after we get rid of BlockingChannel @Deprecated - public long readFromReadableChannel(ReadableByteChannel transportLayer) throws IOException { + public long readFromReadableChannel(ReadableByteChannel channel) throws IOException { int read = 0; if (size.hasRemaining()) { - int bytesRead = transportLayer.read(size); + int bytesRead = channel.read(size); if (bytesRead < 0) throw new EOFException(); read += bytesRead; @@ -93,7 +93,7 @@ public class NetworkReceive implements Receive { } } if (buffer != null) { - int bytesRead = transportLayer.read(buffer); + int bytesRead = channel.read(buffer); if (bytesRead < 0) throw new EOFException(); read += bytesRead; @@ -108,10 +108,10 @@ public class NetworkReceive implements Receive { // Used only by BlockingChannel, so we may be able to get rid of this when/if we get rid of BlockingChannel @Deprecated - public long readCompletely(ReadableByteChannel transportLayer) throws IOException { + public long readCompletely(ReadableByteChannel channel) throws IOException { int totalRead = 0; while (!complete()) { - totalRead += readFromReadableChannel(transportLayer); + totalRead += readFromReadableChannel(channel); } return totalRead; } diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java index ecef62a..eb2cbf3 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java @@ -16,7 +16,7 @@ import java.nio.channels.SelectionKey; import java.util.Map; import org.apache.kafka.common.security.auth.PrincipalBuilder; -import org.apache.kafka.common.config.SecurityConfigs; +import org.apache.kafka.common.config.SSLConfigs; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.common.KafkaException; @@ -30,14 +30,14 @@ public class PlainTextChannelBuilder implements ChannelBuilder { public void configure(Map configs) throws KafkaException { try { - this.principalBuilder = (PrincipalBuilder) Utils.newInstance((Class) configs.get(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG)); + this.principalBuilder = (PrincipalBuilder) Utils.newInstance((Class) configs.get(SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG)); this.principalBuilder.configure(configs); } catch (Exception e) { throw new KafkaException(e); } } - public Channel buildChannel(int id, SelectionKey key) throws KafkaException { + public Channel buildChannel(String id, SelectionKey key) throws KafkaException { Channel channel = null; try { PlainTextTransportLayer transportLayer = new PlainTextTransportLayer(key); diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java index dbd35db..b7529a7 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java @@ -22,8 +22,6 @@ package org.apache.kafka.common.network; */ import java.io.IOException; -import java.io.DataInputStream; -import java.io.DataOutputStream; import java.nio.ByteBuffer; import java.nio.channels.SocketChannel; import java.nio.channels.SelectionKey; @@ -39,8 +37,6 @@ public class PlainTextTransportLayer implements TransportLayer { private static final Logger log = LoggerFactory.getLogger(PlainTextTransportLayer.class); private SelectionKey key; private SocketChannel socketChannel; - private DataInputStream inStream; - private DataOutputStream outStream; private final Principal principal = new KafkaPrincipal("ANONYMOUS"); public PlainTextTransportLayer(SelectionKey key) throws IOException { diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java index 2d89497..8ba9f00 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java @@ -17,7 +17,7 @@ import java.nio.channels.SocketChannel; import java.util.Map; import org.apache.kafka.common.security.auth.PrincipalBuilder; -import org.apache.kafka.common.config.SecurityConfigs; +import org.apache.kafka.common.config.SSLConfigs; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.common.KafkaException; import org.slf4j.Logger; @@ -27,19 +27,24 @@ public class SSLChannelBuilder implements ChannelBuilder { private static final Logger log = LoggerFactory.getLogger(SSLChannelBuilder.class); private SSLFactory sslFactory; private PrincipalBuilder principalBuilder; + private SSLFactory.Mode mode; + + public SSLChannelBuilder(SSLFactory.Mode mode) { + this.mode = mode; + } public void configure(Map configs) throws KafkaException { try { - this.sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT); + this.sslFactory = new SSLFactory(mode); this.sslFactory.configure(configs); - this.principalBuilder = (PrincipalBuilder) Utils.newInstance((Class) configs.get(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG)); + this.principalBuilder = (PrincipalBuilder) Utils.newInstance((Class) configs.get(SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG)); this.principalBuilder.configure(configs); } catch (Exception e) { throw new KafkaException(e); } } - public Channel buildChannel(int id, SelectionKey key) throws KafkaException { + public Channel buildChannel(String id, SelectionKey key) throws KafkaException { Channel channel = null; try { SocketChannel socketChannel = (SocketChannel) key.channel(); diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java index b669069..ec53b69 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java @@ -27,7 +27,7 @@ import javax.net.ssl.*; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Configurable; -import org.apache.kafka.common.config.SecurityConfigs; +import org.apache.kafka.common.config.SSLConfigs; public class SSLFactory implements Configurable { @@ -55,44 +55,44 @@ public class SSLFactory implements Configurable { @Override public void configure(Map configs) throws KafkaException { - this.protocol = (String) configs.get(SecurityConfigs.SSL_PROTOCOL_CONFIG); - this.provider = (String) configs.get(SecurityConfigs.SSL_PROVIDER_CONFIG); + this.protocol = (String) configs.get(SSLConfigs.SSL_PROTOCOL_CONFIG); + this.provider = (String) configs.get(SSLConfigs.SSL_PROVIDER_CONFIG); - if (configs.get(SecurityConfigs.SSL_CIPHER_SUITES_CONFIG) != null) { - List cipherSuitesList = (List) configs.get(SecurityConfigs.SSL_CIPHER_SUITES_CONFIG); + if (configs.get(SSLConfigs.SSL_CIPHER_SUITES_CONFIG) != null) { + List cipherSuitesList = (List) configs.get(SSLConfigs.SSL_CIPHER_SUITES_CONFIG); this.cipherSuites = (String[]) cipherSuitesList.toArray(new String[cipherSuitesList.size()]); } - if (configs.get(SecurityConfigs.SSL_ENABLED_PROTOCOLS_CONFIG) != null) { - List enabledProtocolsList = (List) configs.get(SecurityConfigs.SSL_ENABLED_PROTOCOLS_CONFIG); + if (configs.get(SSLConfigs.SSL_ENABLED_PROTOCOLS_CONFIG) != null) { + List enabledProtocolsList = (List) configs.get(SSLConfigs.SSL_ENABLED_PROTOCOLS_CONFIG); this.enabledProtocols = (String[]) enabledProtocolsList.toArray(new String[enabledProtocolsList.size()]); } - if (configs.containsKey(SecurityConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG)) { - this.endpointIdentification = (String) configs.get(SecurityConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG); + if (configs.containsKey(SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG)) { + this.endpointIdentification = (String) configs.get(SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG); } - if (configs.containsKey(SecurityConfigs.SSL_NEED_CLIENT_AUTH_CONFIG)) { - this.needClientAuth = (Boolean) configs.get(SecurityConfigs.SSL_NEED_CLIENT_AUTH_CONFIG); + if (configs.containsKey(SSLConfigs.SSL_NEED_CLIENT_AUTH_CONFIG)) { + this.needClientAuth = (Boolean) configs.get(SSLConfigs.SSL_NEED_CLIENT_AUTH_CONFIG); } - if (configs.containsKey(SecurityConfigs.SSL_WANT_CLIENT_AUTH_CONFIG)) { - this.wantClientAuth = (Boolean) configs.get(SecurityConfigs.SSL_WANT_CLIENT_AUTH_CONFIG); + if (configs.containsKey(SSLConfigs.SSL_WANT_CLIENT_AUTH_CONFIG)) { + this.wantClientAuth = (Boolean) configs.get(SSLConfigs.SSL_WANT_CLIENT_AUTH_CONFIG); } - this.kmfAlgorithm = (String) configs.get(SecurityConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG); - this.tmfAlgorithm = (String) configs.get(SecurityConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG); + this.kmfAlgorithm = (String) configs.get(SSLConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG); + this.tmfAlgorithm = (String) configs.get(SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG); if (checkKeyStoreConfigs(configs)) { - createKeystore((String) configs.get(SecurityConfigs.SSL_KEYSTORE_TYPE_CONFIG), - (String) configs.get(SecurityConfigs.SSL_KEYSTORE_LOCATION_CONFIG), - (String) configs.get(SecurityConfigs.SSL_KEYSTORE_PASSWORD_CONFIG), - (String) configs.get(SecurityConfigs.SSL_KEY_PASSWORD_CONFIG)); + createKeystore((String) configs.get(SSLConfigs.SSL_KEYSTORE_TYPE_CONFIG), + (String) configs.get(SSLConfigs.SSL_KEYSTORE_LOCATION_CONFIG), + (String) configs.get(SSLConfigs.SSL_KEYSTORE_PASSWORD_CONFIG), + (String) configs.get(SSLConfigs.SSL_KEY_PASSWORD_CONFIG)); } - createTruststore((String) configs.get(SecurityConfigs.SSL_TRUSTSTORE_TYPE_CONFIG), - (String) configs.get(SecurityConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG), - (String) configs.get(SecurityConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG)); + createTruststore((String) configs.get(SSLConfigs.SSL_TRUSTSTORE_TYPE_CONFIG), + (String) configs.get(SSLConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG), + (String) configs.get(SSLConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG)); try { this.sslContext = createSSLContext(); } catch (Exception e) { @@ -177,10 +177,10 @@ public class SSLFactory implements Configurable { } private boolean checkKeyStoreConfigs(Map configs) { - return configs.containsKey(SecurityConfigs.SSL_KEYSTORE_TYPE_CONFIG) && - configs.containsKey(SecurityConfigs.SSL_KEYSTORE_LOCATION_CONFIG) && - configs.containsKey(SecurityConfigs.SSL_KEYSTORE_PASSWORD_CONFIG) && - configs.containsKey(SecurityConfigs.SSL_KEY_PASSWORD_CONFIG); + return configs.containsKey(SSLConfigs.SSL_KEYSTORE_TYPE_CONFIG) && + configs.containsKey(SSLConfigs.SSL_KEYSTORE_LOCATION_CONFIG) && + configs.containsKey(SSLConfigs.SSL_KEYSTORE_PASSWORD_CONFIG) && + configs.containsKey(SSLConfigs.SSL_KEY_PASSWORD_CONFIG); } private class SecurityStore { diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index eb898b4..b9f57aa 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -183,7 +183,7 @@ public class SSLTransportLayer implements TransportLayer { return; } try { - switch(handshakeStatus) { + switch (handshakeStatus) { case NEED_TASK: handshakeStatus = runDelegatedTasks(); break; @@ -343,7 +343,7 @@ public class SSLTransportLayer implements TransportLayer { } cont = result.getStatus() == SSLEngineResult.Status.OK && handshakeStatus == HandshakeStatus.NEED_UNWRAP; - } while(cont); + } while (cont); return result; } @@ -405,7 +405,7 @@ public class SSLTransportLayer implements TransportLayer { } else if (unwrapResult.getStatus() == Status.CLOSED) { throw new EOFException(); } - } while(netReadBuffer.position() != 0); + } while (netReadBuffer.position() != 0); } return read; diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index 40da17e..4187276 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -16,7 +16,6 @@ import java.io.EOFException; import java.io.IOException; import java.net.ConnectException; import java.net.InetSocketAddress; -import java.net.Socket; import java.nio.channels.*; import java.util.*; import java.util.concurrent.TimeUnit; @@ -69,7 +68,7 @@ public class Selector implements Selectable { private static final Logger log = LoggerFactory.getLogger(Selector.class); private final java.nio.channels.Selector nioSelector; - private final Map keys; + private final Map channels; private final List completedSends; private final List completedReceives; private final List disconnected; @@ -103,7 +102,7 @@ public class Selector implements Selectable { this.metricGrpPrefix = metricGrpPrefix; this.metricTags = metricTags; this.channels = new HashMap(); - this.completedSends = new ArrayList(); + this.completedSends = new ArrayList(); this.completedReceives = new ArrayList(); this.connected = new ArrayList(); this.disconnected = new ArrayList(); @@ -117,8 +116,8 @@ public class Selector implements Selectable { this.metricsPerConnection = metricsPerConnection; } - public Selector(long connectionMaxIdleMS, Metrics metrics, Time time, String metricGrpPrefix, Map metricTags) { - this(NetworkReceive.UNLIMITED, connectionMaxIdleMS, metrics, time, metricGrpPrefix, metricTags, true); + public Selector(long connectionMaxIdleMS, Metrics metrics, Time time, String metricGrpPrefix, Map metricTags, ChannelBuilder channelBuilder) { + this(NetworkReceive.UNLIMITED, connectionMaxIdleMS, metrics, time, metricGrpPrefix, metricTags, true, channelBuilder); } /** @@ -165,8 +164,8 @@ public class Selector implements Selectable { * Use this on server-side, when a connection is accepted by a different thread but processed by the Selector * Note that we are not checking if the connection id is valid - since the connection already exists */ - public void register(String id, SocketChannel channel) throws ClosedChannelException { - SelectionKey key = channel.register(nioSelector, SelectionKey.OP_READ); + public void register(String id, SocketChannel socketChannel) throws ClosedChannelException { + SelectionKey key = socketChannel.register(nioSelector, SelectionKey.OP_READ); Channel channel = channelBuilder.buildChannel(id, key); key.attach(channel); this.channels.put(id, channel); @@ -278,24 +277,23 @@ public class Selector implements Selectable { if (channel.isReady() && key.isReadable()) { NetworkReceive networkReceive; try { - while ((networkReceive = channel.read()) != null) { - networkReceive.payload().rewind(); + if ((networkReceive = channel.read()) != null) { this.completedReceives.add(networkReceive); this.sensors.recordBytesReceived(channel.id(), networkReceive.payload().limit()); } } catch (InvalidReceiveException e) { - log.error("Invalid data received from " + transmissions.id + " closing connection", e); - close(transmissions.id); + log.error("Invalid data received from " + channel.id() + " closing connection", e); + close(channel.id()); throw e; } } /* if channel is ready write to any sockets that have space in their buffer and for which we have data */ if (key.isWritable() && channel.isReady()) { - NetworkSend networkSend = channel.write(); - if (networkSend != null) { - this.completedSends.add(networkSend); - this.sensors.recordBytesSent(channel.id(), networkSend.size()); + Send send = channel.write(); + if (send != null) { + this.completedSends.add(send); + this.sensors.recordBytesSent(channel.id(), send.size()); } } @@ -424,7 +422,7 @@ public class Selector implements Selectable { * @param id channel id */ public void close(String id) { - Channel channel = this.channels(id); + Channel channel = this.channels.get(id); close(channel); } @@ -445,7 +443,7 @@ public class Selector implements Selectable { * Get the channel associated with this numeric id */ private Channel channelForId(String id) { - Channel channel = channel.get(id); + Channel channel = this.channels.get(id); if (channel == null) throw new IllegalStateException("Attempt to write to socket for which there is no open connection. Connection id " + id + " existing connections " + channels.keySet().toString()); return channel; @@ -458,13 +456,6 @@ public class Selector implements Selectable { return (Channel) key.attachment(); } - /** - * Get the socket channel associated with this selection key - */ - private SocketChannel channel(SelectionKey key) { - return (SocketChannel) key.channel(); - } - private class SelectorMetrics { private final Metrics metrics; diff --git a/clients/src/main/java/org/apache/kafka/common/network/Send.java b/clients/src/main/java/org/apache/kafka/common/network/Send.java index b0f4cc5..e0d8831 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Send.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Send.java @@ -37,7 +37,7 @@ public interface Send { * @return The number of bytes written * @throws IOException If the write fails */ - public long writeTo(Channel channel) throws IOException; + public long writeTo(GatheringByteChannel channel) throws IOException; /** * Size of the send diff --git a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java index dd544a6..6a085c6 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java @@ -21,13 +21,14 @@ package org.apache.kafka.common.network; * Transport layer for underlying communication */ import java.io.IOException; -import java.nio.ByteBuffer; import java.nio.channels.SocketChannel; +import java.nio.channels.ScatteringByteChannel; +import java.nio.channels.GatheringByteChannel; import java.security.Principal; -public interface TransportLayer { +public interface TransportLayer extends ScatteringByteChannel, GatheringByteChannel { /** * Returns true if the channel has handshake and authenticaiton done. @@ -35,7 +36,7 @@ public interface TransportLayer { boolean isReady(); /** - * calls internal socketChannel.finishConnect() + * Finishes the process of connecting a socket channel. */ void finishConnect() throws IOException; @@ -49,12 +50,6 @@ public interface TransportLayer { */ SocketChannel socketChannel(); - /** - * returns true if socketchannel is open. - */ - boolean isOpen(); - - public void close() throws IOException; /** * returns true if there are any pending bytes needs to be written to channel. @@ -70,26 +65,6 @@ public interface TransportLayer { /** - * Reads sequence of bytes from the channel to given buffer - */ - public int read(ByteBuffer dst) throws IOException; - - public long read(ByteBuffer[] dsts) throws IOException; - - public long read(ByteBuffer[] dsts, int offset, int length) throws IOException; - - /** - * Writes a sequence of bytes to this channel from the given buffer. - */ - public int write(ByteBuffer src) throws IOException; - - public long write(ByteBuffer[] srcs) throws IOException; - - public long write(ByteBuffer[] srcs, int offset, int length) throws IOException; - - - - /** * returns SSLSession.getPeerPrinicpal if SSLTransportLayer used * for non-secure returns a "ANONYMOUS" as the peerPrincipal */ diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java index a38f189..d1759ce 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java @@ -18,7 +18,7 @@ package org.apache.kafka.clients.producer; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.serialization.ByteArraySerializer; -import org.apache.kafka.common.config.SecurityConfigs; +import org.apache.kafka.common.config.SSLConfigs; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.test.MockMetricsReporter; import org.apache.kafka.test.MockSerializer; @@ -60,7 +60,7 @@ public class KafkaProducerTest { configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); configs.put(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL); - configs.put(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SecurityConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); + configs.put(SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SSLConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); final int oldInitCount = MockSerializer.INIT_COUNT.get(); final int oldCloseCount = MockSerializer.CLOSE_COUNT.get(); diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java index a5351ce..1a8cc27 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; -import org.apache.kafka.common.config.SecurityConfigs; +import org.apache.kafka.common.config.SSLConfigs; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Utils; @@ -48,15 +48,15 @@ public class SSLSelectorTest { public void setup() throws Exception { Map> sslConfigs = TestSSLUtils.createSSLConfigs(false, true); Map sslServerConfigs = sslConfigs.get(SSLFactory.Mode.SERVER); - sslServerConfigs.put(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SecurityConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); + sslServerConfigs.put(SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SSLConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); this.server = new EchoServer(sslServerConfigs); this.server.start(); Map sslClientConfigs = sslConfigs.get(SSLFactory.Mode.CLIENT); - sslClientConfigs.put(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SecurityConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); + sslClientConfigs.put(SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SSLConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); - this.channelBuilder = new SSLChannelBuilder(); + this.channelBuilder = new SSLChannelBuilder(SSLFactory.Mode.CLIENT); this.channelBuilder.configure(sslClientConfigs); - this.selector = new Selector(new Metrics(), new MockTime(), "MetricGroup", new LinkedHashMap(), channelBuilder); + this.selector = new Selector(5000, new Metrics(), new MockTime(), "MetricGroup", new LinkedHashMap(), channelBuilder); } @After @@ -71,7 +71,7 @@ public class SSLSelectorTest { */ @Test public void testSendLargeRequest() throws Exception { - int node = 0; + String node = "0"; blockingConnect(node); String big = TestUtils.randomString(10 * BUFFER_SIZE); assertEquals(big, blockingRequest(node, big)); @@ -83,7 +83,7 @@ public class SSLSelectorTest { */ @Test public void testServerDisconnect() throws Exception { - int node = 0; + String node = "0"; // connect and do a simple request blockingConnect(node); assertEquals("hello", blockingRequest(node, "hello")); @@ -104,7 +104,7 @@ public class SSLSelectorTest { */ @Test public void testClientDisconnect() throws Exception { - int node = 0; + String node = "0"; blockingConnect(node); selector.disconnect(node); selector.send(createSend(node, "hello1")); @@ -123,7 +123,7 @@ public class SSLSelectorTest { @Test public void testLargeMessageSequence() throws Exception { int bufferSize = 512 * 1024; - int node = 0; + String node = "0"; int reqs = 50; InetSocketAddress addr = new InetSocketAddress("localhost", server.port); selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); @@ -137,7 +137,7 @@ public class SSLSelectorTest { */ @Test public void testEmptyRequest() throws Exception { - int node = 0; + String node = "0"; blockingConnect(node); assertEquals("", blockingRequest(node, "")); } @@ -145,72 +145,72 @@ public class SSLSelectorTest { @Test public void testMute() throws Exception { - blockingConnect(0); - blockingConnect(1); + blockingConnect("0"); + blockingConnect("1"); - selector.send(createSend(0, "hello")); - selector.send(createSend(1, "hi")); - selector.mute(1); + selector.send(createSend("0", "hello")); + selector.send(createSend("1", "hi")); + selector.mute("1"); while (selector.completedReceives().isEmpty()) selector.poll(5); assertEquals("We should have only one response", 1, selector.completedReceives().size()); - assertEquals("The response should not be from the muted node", 0, selector.completedReceives().get(0).source()); - selector.unmute(1); + assertEquals("The response should not be from the muted node", "0", selector.completedReceives().get(0).source()); + selector.unmute("1"); do { selector.poll(5); } while (selector.completedReceives().isEmpty()); assertEquals("We should have only one response", 1, selector.completedReceives().size()); - assertEquals("The response should be from the previously muted node", 1, selector.completedReceives().get(0).source()); + assertEquals("The response should be from the previously muted node", "1", selector.completedReceives().get(0).source()); } /** * Tests that SSL renegotiation initiated by the server are handled correctly by the client * @throws Exception */ - @Test - public void testRenegotiation() throws Exception { - int reqs = 500; - int node = 0; - // create connections - InetSocketAddress addr = new InetSocketAddress("localhost", server.port); - selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); - - // send echo requests and receive responses - int requests = 0; - int responses = 0; - int renegotiates = 0; - selector.send(createSend(node, node + "-" + 0)); - requests++; - - // loop until we complete all requests - while (responses < reqs) { - selector.poll(0L); - if (responses >= 100 && renegotiates == 0) { - renegotiates++; - server.renegotiate(); - } - assertEquals("No disconnects should have occurred.", 0, selector.disconnected().size()); - - // handle any responses we may have gotten - for (NetworkReceive receive : selector.completedReceives()) { - String[] pieces = asString(receive).split("-"); - assertEquals("Should be in the form 'conn-counter'", 2, pieces.length); - assertEquals("Check the source", receive.source(), Integer.parseInt(pieces[0])); - assertEquals("Check that the receive has kindly been rewound", 0, receive.payload().position()); - assertEquals("Check the request counter", responses, Integer.parseInt(pieces[1])); - responses++; - } - - // prepare new sends for the next round - for (int i = 0; i < selector.completedSends().size() && requests < reqs; i++, requests++) { - selector.send(createSend(node, node + "-" + requests)); - } - } - } - - - private String blockingRequest(int node, String s) throws IOException { + // @Test + // public void testRenegotiation() throws Exception { + // int reqs = 500; + // String node = "0"; + // // create connections + // InetSocketAddress addr = new InetSocketAddress("localhost", server.port); + // selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); + + // // send echo requests and receive responses + // int requests = 0; + // int responses = 0; + // int renegotiates = 0; + // selector.send(createSend(node, node + "-" + 0)); + // requests++; + + // // loop until we complete all requests + // while (responses < reqs) { + // selector.poll(0L); + // if (responses >= 100 && renegotiates == 0) { + // renegotiates++; + // server.renegotiate(); + // } + // assertEquals("No disconnects should have occurred.", 0, selector.disconnected().size()); + + // // handle any responses we may have gotten + // for (NetworkReceive receive : selector.completedReceives()) { + // String[] pieces = asString(receive).split("-"); + // assertEquals("Should be in the form 'conn-counter'", 2, pieces.length); + // assertEquals("Check the source", receive.source(), pieces[0]); + // assertEquals("Check that the receive has kindly been rewound", 0, receive.payload().position()); + // assertEquals("Check the request counter", responses, Integer.parseInt(pieces[1])); + // responses++; + // } + + // // prepare new sends for the next round + // for (int i = 0; i < selector.completedSends().size() && requests < reqs; i++, requests++) { + // selector.send(createSend(node, node + "-" + requests)); + // } + // } + // } + + + private String blockingRequest(String node, String s) throws IOException { selector.send(createSend(node, s)); while (true) { selector.poll(1000L); @@ -224,19 +224,19 @@ public class SSLSelectorTest { return new String(Utils.toArray(receive.payload())); } - private NetworkSend createSend(int node, String s) { + private NetworkSend createSend(String node, String s) { return new NetworkSend(node, ByteBuffer.wrap(s.getBytes())); } /* connect and wait for the connection to complete */ - private void blockingConnect(int node) throws IOException { + private void blockingConnect(String node) throws IOException { selector.connect(node, new InetSocketAddress("localhost", server.port), BUFFER_SIZE, BUFFER_SIZE); while (!selector.connected().contains(node)) selector.poll(10000L); } - private void sendAndReceive(int node, String requestPrefix, int startIndex, int endIndex) throws Exception { + private void sendAndReceive(String node, String requestPrefix, int startIndex, int endIndex) throws Exception { int requests = startIndex; int responses = startIndex; selector.send(createSend(node, requestPrefix + "-" + startIndex)); diff --git a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java index 3d3db2c..3aa519e 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java @@ -24,7 +24,7 @@ import java.nio.ByteBuffer; import java.util.*; import org.apache.kafka.common.metrics.Metrics; -import org.apache.kafka.common.config.SecurityConfigs; +import org.apache.kafka.common.config.SSLConfigs; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.test.TestUtils; @@ -46,13 +46,13 @@ public class SelectorTest { @Before public void setup() throws Exception { Map configs = new HashMap(); - configs.put(SecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SecurityConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); + configs.put(SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SSLConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); this.server = new EchoServer(configs); this.server.start(); this.channelBuilder = new PlainTextChannelBuilder(); this.channelBuilder.configure(configs); - this.selector = new Selector(5000, new Metrics(), new MockTime() , "MetricGroup", new LinkedHashMap(), ChannelBuilder); + this.selector = new Selector(5000, new Metrics(), new MockTime() , "MetricGroup", new LinkedHashMap(), channelBuilder); } @After @@ -214,7 +214,7 @@ public class SelectorTest { @Test public void testLargeMessageSequence() throws Exception { int bufferSize = 512 * 1024; - int node = 0; + String node = "0"; int reqs = 50; InetSocketAddress addr = new InetSocketAddress("localhost", server.port); selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); @@ -289,7 +289,7 @@ public class SelectorTest { return new String(Utils.toArray(receive.payload())); } - private void sendAndReceive(int node, String requestPrefix, int startIndex, int endIndex) throws Exception { + private void sendAndReceive(String node, String requestPrefix, int startIndex, int endIndex) throws Exception { int requests = startIndex; int responses = startIndex; selector.send(createSend(node, requestPrefix + "-" + startIndex)); diff --git a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java index 4edbe36..1930cc2 100644 --- a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java +++ b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java @@ -17,7 +17,7 @@ package org.apache.kafka.test; -import org.apache.kafka.common.config.SecurityConfigs; +import org.apache.kafka.common.config.SSLConfigs; import org.apache.kafka.common.network.SSLFactory; import org.apache.kafka.clients.CommonClientConfigs; @@ -172,25 +172,24 @@ public class TestSSLUtils { File trustStoreFile, String trustStorePassword, boolean useClientCert) { Map sslConfigs = new HashMap(); sslConfigs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL"); // kafka security protocol - sslConfigs.put(SecurityConfigs.SSL_PROTOCOL_CONFIG, "TLSv1.2"); // protocol to create SSLContext + sslConfigs.put(SSLConfigs.SSL_PROTOCOL_CONFIG, "TLSv1.2"); // protocol to create SSLContext if (mode == SSLFactory.Mode.SERVER || (mode == SSLFactory.Mode.CLIENT && keyStoreFile != null)) { - sslConfigs.put(SecurityConfigs.SSL_KEYSTORE_LOCATION_CONFIG, keyStoreFile.getPath()); - sslConfigs.put(SecurityConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS"); - sslConfigs.put(SecurityConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, TrustManagerFactory.getDefaultAlgorithm()); - sslConfigs.put(SecurityConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, password); - sslConfigs.put(SecurityConfigs.SSL_KEY_PASSWORD_CONFIG, keyPassword); + sslConfigs.put(SSLConfigs.SSL_KEYSTORE_LOCATION_CONFIG, keyStoreFile.getPath()); + sslConfigs.put(SSLConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS"); + sslConfigs.put(SSLConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, TrustManagerFactory.getDefaultAlgorithm()); + sslConfigs.put(SSLConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, password); + sslConfigs.put(SSLConfigs.SSL_KEY_PASSWORD_CONFIG, keyPassword); } - sslConfigs.put(SecurityConfigs.SSL_CLIENT_REQUIRE_CERT_CONFIG, useClientCert); - sslConfigs.put(SecurityConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, trustStoreFile.getPath()); - sslConfigs.put(SecurityConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, trustStorePassword); - sslConfigs.put(SecurityConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "JKS"); - sslConfigs.put(SecurityConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, TrustManagerFactory.getDefaultAlgorithm()); + sslConfigs.put(SSLConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, trustStoreFile.getPath()); + sslConfigs.put(SSLConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, trustStorePassword); + sslConfigs.put(SSLConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "JKS"); + sslConfigs.put(SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, TrustManagerFactory.getDefaultAlgorithm()); List enabledProtocols = new ArrayList(); enabledProtocols.add("TLSv1.2"); - sslConfigs.put(SecurityConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, enabledProtocols); + sslConfigs.put(SSLConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, enabledProtocols); return sslConfigs; } diff --git a/core/src/main/scala/kafka/cluster/EndPoint.scala b/core/src/main/scala/kafka/cluster/EndPoint.scala index e9008e6..76997b5 100644 --- a/core/src/main/scala/kafka/cluster/EndPoint.scala +++ b/core/src/main/scala/kafka/cluster/EndPoint.scala @@ -42,7 +42,7 @@ object EndPoint { * @return */ def createEndPoint(connectionString: String): EndPoint = { - val uriParseExp = """^(.*)://\[?([0-9a-z\-.:]*)\]?:(-?[0-9]+)""".r + val uriParseExp = """^(.*)://\[?([0-9a-zA-Z\-.:]*)\]?:(-?[0-9]+)""".r connectionString match { case uriParseExp(protocol, "", port) => new EndPoint(null, port.toInt, SecurityProtocol.valueOf(protocol)) case uriParseExp(protocol, host, port) => new EndPoint(host, port.toInt, SecurityProtocol.valueOf(protocol)) diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index 91319fa..6cf1a8d 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -31,7 +31,8 @@ import kafka.metrics.KafkaMetricsGroup import kafka.utils._ import org.apache.kafka.common.MetricName import org.apache.kafka.common.metrics.Metrics -import org.apache.kafka.common.network.InvalidReceiveException +import org.apache.kafka.common.network.{InvalidReceiveException, ChannelBuilder, + PlainTextChannelBuilder, SSLChannelBuilder, SSLFactory} import org.apache.kafka.common.protocol.SecurityProtocol import org.apache.kafka.common.protocol.types.SchemaException import org.apache.kafka.common.utils.{SystemTime, Time, Utils} @@ -41,7 +42,7 @@ import scala.collection._ /** * An NIO socket server. The threading model is * 1 Acceptor thread that handles new connections - * N Processor threads that each have their own selector and read requests from sockets + * Acceptor has N Processor threads that each have their own selector and read requests from sockets * M Handler threads that handle requests and produce responses back to the processor threads for writing. */ class SocketServer(val brokerId: Int, @@ -54,13 +55,13 @@ class SocketServer(val brokerId: Int, val maxConnectionsPerIp: Int = Int.MaxValue, val connectionsMaxIdleMs: Long, val maxConnectionsPerIpOverrides: Map[String, Int], + val channelConfigs: java.util.Map[String, Object], val time: Time, val metrics: Metrics) extends Logging with KafkaMetricsGroup { this.logIdent = "[Socket Server on Broker " + brokerId + "], " - - private val processors = new Array[Processor](numProcessorThreads) - private[network] var acceptors = mutable.Map[EndPoint,Acceptor]() val requestChannel = new RequestChannel(numProcessorThreads, maxQueuedRequests) + private[network] var acceptors = mutable.Map[EndPoint,Acceptor]() + private val allMetricNames = (0 until numProcessorThreads).map { i => val tags = new util.HashMap[String, String]() @@ -90,32 +91,15 @@ class SocketServer(val brokerId: Int, ) - this.synchronized { - for (i <- 0 until numProcessorThreads) { - processors(i) = new Processor(i, - time, - maxRequestSize, - numProcessorThreads, - requestChannel, - quotas, - connectionsMaxIdleMs, - portToProtocol, - metrics - ) - Utils.newThread("kafka-network-thread-%d-%d".format(brokerId, i), processors(i), false).start() - } - } - // register the processor threads for notification of responses - requestChannel.addResponseListener((id:Int) => processors(id).wakeup()) - // start accepting connections // right now we will use the same processors for all ports, since we didn't implement different protocols // in the future, we may implement different processors for SSL and Kerberos this.synchronized { endpoints.values.foreach(endpoint => { - val acceptor = new Acceptor(endpoint.host, endpoint.port, processors, sendBufferSize, recvBufferSize, quotas, endpoint.protocolType, portToProtocol) + val acceptor = new Acceptor(endpoint.host, endpoint.port, sendBufferSize, recvBufferSize, requestChannel, quotas, endpoint.protocolType, + portToProtocol, channelConfigs, numProcessorThreads, maxQueuedRequests, maxRequestSize, connectionsMaxIdleMs, metrics, time, brokerId) acceptors.put(endpoint, acceptor) Utils.newThread("kafka-socket-acceptor-%s-%d".format(endpoint.protocolType.toString, endpoint.port), acceptor, false).start() acceptor.awaitStartup @@ -132,7 +116,6 @@ class SocketServer(val brokerId: Int, info("Shutting down") this.synchronized { acceptors.values.foreach(_.shutdown) - processors.foreach(_.shutdown) } info("Shutdown completed") } @@ -144,8 +127,8 @@ class SocketServer(val brokerId: Int, case e: Exception => throw new KafkaException("Tried to check server's port before server was started or checked for port of non-existing protocol", e) } } -} +} /** * A base class with some helper variables and methods */ @@ -187,7 +170,7 @@ private[kafka] abstract class AbstractServerThread(connectionQuotas: ConnectionQ * Is the server still running? */ protected def isRunning = alive.get - + /** * Close the given key and associated socket */ @@ -198,7 +181,7 @@ private[kafka] abstract class AbstractServerThread(connectionQuotas: ConnectionQ swallowError(key.cancel()) } } - + def close(channel: SocketChannel) { if(channel != null) { debug("Closing connection from " + channel.socket.getRemoteSocketAddress()) @@ -212,17 +195,45 @@ private[kafka] abstract class AbstractServerThread(connectionQuotas: ConnectionQ /** * Thread that accepts and configures new connections. There is only need for one of these */ -private[kafka] class Acceptor(val host: String, +private[kafka] class Acceptor(val host: String, private val port: Int, - private val processors: Array[Processor], - val sendBufferSize: Int, + val sendBufferSize: Int, val recvBufferSize: Int, + requestChannel: RequestChannel, connectionQuotas: ConnectionQuotas, protocol: SecurityProtocol, - portToProtocol: ConcurrentHashMap[Int, SecurityProtocol]) extends AbstractServerThread(connectionQuotas) { + portToProtocol: ConcurrentHashMap[Int, SecurityProtocol], + channelConfigs: java.util.Map[String, Object], + numProcessorThreads: Int, + maxQueuedRequests: Int, + maxRequestSize: Int, + connectionsMaxIdleMs: Long, + metrics: Metrics, + time: Time, + brokerId: Int) extends AbstractServerThread(connectionQuotas) { val nioSelector = java.nio.channels.Selector.open() val serverChannel = openServerSocket(host, port) + private val processors = new Array[Processor](numProcessorThreads) portToProtocol.put(serverChannel.socket().getLocalPort, protocol) + this.synchronized { + for (i <- 0 until numProcessorThreads) { + processors(i) = new Processor(i, + time, + maxRequestSize, + numProcessorThreads, + requestChannel, + connectionQuotas, + connectionsMaxIdleMs, + protocol, + channelConfigs, + metrics + ) + Utils.newThread("kafka-network-thread-%d-%s-%d".format(brokerId, protocol.name, i), processors(i), false).start() + } + } + + // register the processor threads for notification of responses + requestChannel.addResponseListener((id:Int) => processors(id).wakeup()) /** * Accept loop that checks for new connection attempts @@ -259,12 +270,12 @@ private[kafka] class Acceptor(val host: String, swallowError(nioSelector.close()) shutdownComplete() } - + /* * Create a server socket to listen for connections on. */ def openServerSocket(host: String, port: Int): ServerSocketChannel = { - val socketAddress = + val socketAddress = if(host == null || host.trim.isEmpty) new InetSocketAddress(port) else @@ -276,7 +287,7 @@ private[kafka] class Acceptor(val host: String, serverChannel.socket.bind(socketAddress) info("Awaiting socket connections on %s:%d.".format(socketAddress.getHostName, serverChannel.socket.getLocalPort)) } catch { - case e: SocketException => + case e: SocketException => throw new KafkaException("Socket server failed to bind to %s:%d: %s.".format(socketAddress.getHostName, port, e.getMessage), e) } serverChannel @@ -314,6 +325,12 @@ private[kafka] class Acceptor(val host: String, @Override def wakeup = nioSelector.wakeup() + + override def shutdown() = { + processors.foreach(_.shutdown) + super.shutdown + } + } /** @@ -327,15 +344,17 @@ private[kafka] class Processor(val id: Int, val requestChannel: RequestChannel, connectionQuotas: ConnectionQuotas, val connectionsMaxIdleMs: Long, - val portToProtocol: ConcurrentHashMap[Int,SecurityProtocol], + val protocol: SecurityProtocol, + val channelConfigs: java.util.Map[String, Object], val metrics: Metrics) extends AbstractServerThread(connectionQuotas) with KafkaMetricsGroup { private val newConnections = new ConcurrentLinkedQueue[SocketChannel]() private val inflightResponses = mutable.Map[String, RequestChannel.Response]() - + private val channelBuilder = createChannelBuilder private val metricTags = new util.HashMap[String, String]() metricTags.put("networkProcessor", id.toString) + newGauge("IdlePercent", new Gauge[Double] { def value = { @@ -352,7 +371,8 @@ private[kafka] class Processor(val id: Int, time, "socket-server", metricTags, - false) + false, + channelBuilder) override def run() { startupComplete() @@ -458,6 +478,15 @@ private[kafka] class Processor(val id: Int, } } + private def createChannelBuilder():ChannelBuilder = { + var channelBuilder:ChannelBuilder = new PlainTextChannelBuilder() + if (protocol == SecurityProtocol.SSL) { + channelBuilder = new SSLChannelBuilder(SSLFactory.Mode.SERVER) + } + channelBuilder.configure(channelConfigs) + channelBuilder + } + /** * Close all open connections */ @@ -476,7 +505,7 @@ private[kafka] class Processor(val id: Int, class ConnectionQuotas(val defaultMax: Int, overrideQuotas: Map[String, Int]) { private val overrides = overrideQuotas.map(entry => (InetAddress.getByName(entry._1), entry._2)) private val counts = mutable.Map[InetAddress, Int]() - + def inc(addr: InetAddress) { counts synchronized { val count = counts.getOrElse(addr, 0) @@ -486,7 +515,7 @@ class ConnectionQuotas(val defaultMax: Int, overrideQuotas: Map[String, Int]) { throw new TooManyConnectionsException(addr, max) } } - + def dec(addr: InetAddress) { counts synchronized { val count = counts.get(addr).get @@ -496,7 +525,7 @@ class ConnectionQuotas(val defaultMax: Int, overrideQuotas: Map[String, Int]) { counts.put(addr, count - 1) } } - + } class TooManyConnectionsException(val ip: InetAddress, val count: Int) extends KafkaException("Too many connections from %s (maximum = %d)".format(ip, count)) diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 2d75186..9d83921 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -26,9 +26,11 @@ import kafka.consumer.ConsumerConfig import kafka.message.{BrokerCompressionCodec, CompressionCodec, Message, MessageSet} import kafka.utils.CoreUtils import org.apache.kafka.clients.CommonClientConfigs +import org.apache.kafka.common.config.SSLConfigs import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.metrics.MetricsReporter import org.apache.kafka.common.protocol.SecurityProtocol +import org.apache.kafka.common.security.auth.PrincipalBuilder import scala.collection.{mutable, immutable, JavaConversions, Map} object Defaults { @@ -137,6 +139,23 @@ object Defaults { val MetricNumSamples = 2 val MetricSampleWindowMs = 1000 val MetricReporterClasses = "" + + /** ********* SSL configuration ***********/ + val PrincipalBuilderClass = SSLConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS + val SSLProtocol = SSLConfigs.DEFAULT_SSL_PROTOCOL + val SSLEnabledProtocols = SSLConfigs.DEFAULT_ENABLED_PROTOCOLS + val SSLKeystoreType = SSLConfigs.DEFAULT_SSL_KEYSTORE_TYPE + val SSLKeystoreLocation = "/tmp/ssl.keystore.jks" + val SSLKeystorePassword = "keystore_password" + val SSLKeyPassword = "key_password" + val SSLTruststoreType = SSLConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE + val SSLTruststoreLocation = SSLConfigs.DEFAULT_TRUSTSTORE_LOCATION + val SSLTruststorePassword = SSLConfigs.DEFAULT_TRUSTSTORE_PASSWORD + val SSLKeyManagerAlgorithm = SSLConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM + val SSLTrustManagerAlgorithm = SSLConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM + val SSLNeedClientAuth = false + val SSLWantClientAuth = false + } object KafkaConfig { @@ -251,6 +270,25 @@ object KafkaConfig { val MetricNumSamplesProp: String = CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG val MetricReporterClassesProp: String = CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG + /** ********* SSL Configuration ****************/ + val PrincipalBuilderClassProp = SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG + val SSLProtocolProp = SSLConfigs.SSL_PROTOCOL_CONFIG + val SSLProviderProp = SSLConfigs.SSL_PROVIDER_CONFIG + val SSLCipherSuitesProp = SSLConfigs.SSL_CIPHER_SUITES_CONFIG + val SSLEnabledProtocolsProp = SSLConfigs.SSL_ENABLED_PROTOCOLS_CONFIG + val SSLKeystoreTypeProp = SSLConfigs.SSL_KEYSTORE_TYPE_CONFIG + val SSLKeystoreLocationProp = SSLConfigs.SSL_KEYSTORE_LOCATION_CONFIG + val SSLKeystorePasswordProp = SSLConfigs.SSL_KEYSTORE_PASSWORD_CONFIG + val SSLKeyPasswordProp = SSLConfigs.SSL_KEY_PASSWORD_CONFIG + val SSLTruststoreTypeProp = SSLConfigs.SSL_TRUSTSTORE_TYPE_CONFIG + val SSLTruststoreLocationProp = SSLConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG + val SSLTruststorePasswordProp = SSLConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG + val SSLKeyManagerAlgorithmProp = SSLConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG + val SSLTrustManagerAlgorithmProp = SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG + val SSLEndpointIdentificationAlgorithmProp = SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG + val SSLNeedClientAuthProp = SSLConfigs.SSL_NEED_CLIENT_AUTH_CONFIG + val SSLWantClientAuthProp = SSLConfigs.SSL_WANT_CLIENT_AUTH_CONFIG + /* Documentation */ /** ********* Zookeeper Configuration ***********/ @@ -389,6 +427,25 @@ object KafkaConfig { val MetricNumSamplesDoc = CommonClientConfigs.METRICS_NUM_SAMPLES_DOC val MetricReporterClassesDoc = CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC + /** ********* SSL Configuration ****************/ + val PrincipalBuilderClassDoc = SSLConfigs.PRINCIPAL_BUILDER_CLASS_DOC + val SSLProtocolDoc = SSLConfigs.SSL_PROTOCOL_DOC + val SSLProviderDoc = SSLConfigs.SSL_PROVIDER_DOC + val SSLCipherSuitesDoc = SSLConfigs.SSL_CIPHER_SUITES_DOC + val SSLEnabledProtocolsDoc = SSLConfigs.SSL_ENABLED_PROTOCOLS_DOC + val SSLKeystoreTypeDoc = SSLConfigs.SSL_KEYSTORE_TYPE_DOC + val SSLKeystoreLocationDoc = SSLConfigs.SSL_KEYSTORE_LOCATION_DOC + val SSLKeystorePasswordDoc = SSLConfigs.SSL_KEYSTORE_PASSWORD_DOC + val SSLKeyPasswordDoc = SSLConfigs.SSL_KEY_PASSWORD_DOC + val SSLTruststoreTypeDoc = SSLConfigs.SSL_TRUSTSTORE_TYPE_DOC + val SSLTruststorePasswordDoc = SSLConfigs.SSL_TRUSTSTORE_PASSWORD_DOC + val SSLTruststoreLocationDoc = SSLConfigs.SSL_TRUSTSTORE_LOCATION_DOC + val SSLKeyManagerAlgorithmDoc = SSLConfigs.SSL_KEYMANAGER_ALGORITHM_DOC + val SSLTrustManagerAlgorithmDoc = SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC + val SSLEndpointIdentificationAlgorithmDoc = SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC + val SSLNeedClientAuthDoc = SSLConfigs.SSL_NEED_CLIENT_AUTH_DOC + val SSLWantClientAuthDoc = SSLConfigs.SSL_WANT_CLIENT_AUTH_DOC + private val configDef = { import ConfigDef.Range._ @@ -512,6 +569,23 @@ object KafkaConfig { .define(MetricNumSamplesProp, INT, Defaults.MetricNumSamples, atLeast(1), LOW, MetricNumSamplesDoc) .define(MetricSampleWindowMsProp, LONG, Defaults.MetricSampleWindowMs, atLeast(1), LOW, MetricSampleWindowMsDoc) .define(MetricReporterClassesProp, LIST, Defaults.MetricReporterClasses, LOW, MetricReporterClassesDoc) + + /** ********* SSL Configuration ****************/ + .define(PrincipalBuilderClassProp, STRING, Defaults.PrincipalBuilderClass, MEDIUM, PrincipalBuilderClassDoc) + .define(SSLProtocolProp, STRING, Defaults.SSLProtocol, MEDIUM, SSLProtocolDoc) + .define(SSLProviderProp, STRING, MEDIUM, SSLProviderDoc, false) + .define(SSLEnabledProtocolsProp, LIST, Defaults.SSLEnabledProtocols, MEDIUM, SSLEnabledProtocolsDoc) + .define(SSLKeystoreTypeProp, STRING, Defaults.SSLKeystoreType, MEDIUM, SSLKeystoreTypeDoc) + .define(SSLKeystoreLocationProp, STRING, Defaults.SSLKeystoreLocation, MEDIUM, SSLKeystoreLocationDoc) + .define(SSLKeystorePasswordProp, STRING, Defaults.SSLKeystorePassword, MEDIUM, SSLKeystorePasswordDoc) + .define(SSLKeyPasswordProp, STRING, Defaults.SSLKeyPassword, MEDIUM, SSLKeyPasswordDoc) + .define(SSLTruststoreTypeProp, STRING, Defaults.SSLTruststoreType, MEDIUM, SSLTruststoreTypeDoc) + .define(SSLTruststoreLocationProp, STRING, Defaults.SSLTruststoreLocation, MEDIUM, SSLTruststoreLocationDoc) + .define(SSLTruststorePasswordProp, STRING, Defaults.SSLTruststorePassword, MEDIUM, SSLTruststorePasswordDoc) + .define(SSLKeyManagerAlgorithmProp, STRING, Defaults.SSLKeyManagerAlgorithm, MEDIUM, SSLKeyManagerAlgorithmDoc) + .define(SSLTrustManagerAlgorithmProp, STRING, Defaults.SSLTrustManagerAlgorithm, MEDIUM, SSLTrustManagerAlgorithmDoc) + .define(SSLNeedClientAuthProp, BOOLEAN, Defaults.SSLNeedClientAuth, MEDIUM, SSLNeedClientAuthDoc) + .define(SSLWantClientAuthProp, BOOLEAN, Defaults.SSLWantClientAuth, MEDIUM, SSLWantClientAuthDoc) } def configNames() = { @@ -639,7 +713,24 @@ object KafkaConfig { compressionType = parsed.get(CompressionTypeProp).asInstanceOf[String], metricNumSamples = parsed.get(MetricNumSamplesProp).asInstanceOf[Int], metricSampleWindowMs = parsed.get(MetricSampleWindowMsProp).asInstanceOf[Long], - _metricReporterClasses = parsed.get(MetricReporterClassesProp).asInstanceOf[java.util.List[String]] + _metricReporterClasses = parsed.get(MetricReporterClassesProp).asInstanceOf[java.util.List[String]], + + /** *************** SSL configuration *****************/ + principalBuilderClass = parsed.get(PrincipalBuilderClassProp).asInstanceOf[String], + sslProtocol = parsed.get(SSLProtocolProp).asInstanceOf[String], + _sslProvider = Option(parsed.get(SSLProviderProp)).map(_.asInstanceOf[String]), + sslEnabledProtocols = parsed.get(SSLEnabledProtocolsProp).asInstanceOf[java.util.List[String]], + sslKeystoreType = parsed.get(SSLKeystoreTypeProp).asInstanceOf[String], + sslKeystoreLocation = parsed.get(SSLKeystoreLocationProp).asInstanceOf[String], + sslKeystorePassword = parsed.get(SSLKeystorePasswordProp).asInstanceOf[String], + sslKeyPassword = parsed.get(SSLKeyPasswordProp).asInstanceOf[String], + sslTruststoreType = parsed.get(SSLTruststoreTypeProp).asInstanceOf[String], + sslTruststoreLocation = parsed.get(SSLTruststoreLocationProp).asInstanceOf[String], + sslTruststorePassword = parsed.get(SSLTruststorePasswordProp).asInstanceOf[String], + sslKeyManagerAlgorithm = parsed.get(SSLKeyManagerAlgorithmProp).asInstanceOf[String], + sslTrustManagerAlgorithm = parsed.get(SSLTrustManagerAlgorithmProp).asInstanceOf[String], + sslNeedClientAuth = parsed.get(SSLNeedClientAuthProp).asInstanceOf[Boolean], + sslWantClientAuth = parsed.get(SSLWantClientAuthProp).asInstanceOf[Boolean] ) } @@ -791,7 +882,24 @@ class KafkaConfig (/** ********* Zookeeper Configuration ***********/ val metricSampleWindowMs: Long = Defaults.MetricSampleWindowMs, val metricNumSamples: Int = Defaults.MetricNumSamples, - private val _metricReporterClasses: java.util.List[String] = util.Arrays.asList(Defaults.MetricReporterClasses) + private val _metricReporterClasses: java.util.List[String] = util.Arrays.asList(Defaults.MetricReporterClasses), + + /** ********** SSL Configuration ************/ + val principalBuilderClass: String = Defaults.PrincipalBuilderClass, + val sslProtocol: String = Defaults.SSLProtocol, + private val _sslProvider: Option[String] = None, + val sslEnabledProtocols: java.util.List[String] = util.Arrays.asList(Defaults.SSLEnabledProtocols), + val sslKeystoreType: String = Defaults.SSLKeystoreType, + val sslKeystoreLocation: String = Defaults.SSLKeystoreLocation, + val sslKeystorePassword: String = Defaults.SSLKeystorePassword, + val sslKeyPassword: String = Defaults.SSLKeyPassword, + val sslTruststoreType: String = Defaults.SSLTruststoreType, + val sslTruststoreLocation: String = Defaults.SSLTruststoreLocation, + val sslTruststorePassword: String = Defaults.SSLTruststorePassword, + val sslKeyManagerAlgorithm: String = Defaults.SSLKeyManagerAlgorithm, + val sslTrustManagerAlgorithm: String = Defaults.SSLTrustManagerAlgorithm, + val sslNeedClientAuth: Boolean = Defaults.SSLNeedClientAuth, + val sslWantClientAuth: Boolean = Defaults.SSLWantClientAuth ) { val zkConnectionTimeoutMs: Int = _zkConnectionTimeoutMs.getOrElse(zkSessionTimeoutMs) @@ -812,6 +920,7 @@ class KafkaConfig (/** ********* Zookeeper Configuration ***********/ getMap(KafkaConfig.MaxConnectionsPerIpOverridesProp, _maxConnectionsPerIpOverrides).map { case (k, v) => (k, v.toInt)} val metricReporterClasses: java.util.List[MetricsReporter] = getMetricClasses(_metricReporterClasses) + val sslProvider = _sslProvider.getOrElse("") private def getLogRetentionTimeMillis: Long = { val millisInMinute = 60L * 1000L @@ -895,7 +1004,9 @@ class KafkaConfig (/** ********* Zookeeper Configuration ***********/ } - + private def getPrincipalBuilderClass(principalBuilderClass: String): PrincipalBuilder = { + CoreUtils.createObject[PrincipalBuilder](principalBuilderClass) + } validateValues() @@ -1041,6 +1152,42 @@ class KafkaConfig (/** ********* Zookeeper Configuration ***********/ props.put(MetricSampleWindowMsProp, metricSampleWindowMs.toString) props.put(MetricReporterClassesProp, JavaConversions.collectionAsScalaIterable(_metricReporterClasses).mkString(",")) + /** ********* SSL configuration ***********/ + props.put(PrincipalBuilderClassProp, principalBuilderClass) + props.put(SSLProtocolProp, sslProtocol) + props.put(SSLProviderProp, sslProvider) + props.put(SSLEnabledProtocolsProp, JavaConversions.collectionAsScalaIterable(sslEnabledProtocols).mkString(",")) + props.put(SSLKeystoreTypeProp, sslKeystoreType) + props.put(SSLKeystoreLocationProp, sslKeystoreLocation) + props.put(SSLKeystorePasswordProp, sslKeystorePassword) + props.put(SSLKeyPasswordProp, sslKeyPassword) + props.put(SSLTruststoreTypeProp, sslTruststoreType) + props.put(SSLTruststoreLocationProp, sslTruststoreLocation) + props.put(SSLTruststorePasswordProp, sslTruststorePassword) + props.put(SSLKeyManagerAlgorithmProp, sslKeyManagerAlgorithm) + props.put(SSLTrustManagerAlgorithmProp, sslTrustManagerAlgorithm) + props.put(SSLNeedClientAuthProp, sslNeedClientAuth.toString) + props.put(SSLWantClientAuthProp, sslWantClientAuth.toString) props } + + def channelConfigs: java.util.Map[String, Object] = { + val channelConfigs = new java.util.HashMap[String, Object]() + import kafka.server.KafkaConfig._ + channelConfigs.put(PrincipalBuilderClassProp, Class.forName(principalBuilderClass)) + channelConfigs.put(SSLProtocolProp, sslProtocol) + channelConfigs.put(SSLEnabledProtocolsProp, sslEnabledProtocols) + channelConfigs.put(SSLKeystoreTypeProp, sslKeystoreType) + channelConfigs.put(SSLKeystoreLocationProp, sslKeystoreLocation) + channelConfigs.put(SSLKeystorePasswordProp, sslKeystorePassword) + channelConfigs.put(SSLKeyPasswordProp, sslKeyPassword) + channelConfigs.put(SSLTruststoreTypeProp, sslTruststoreType) + channelConfigs.put(SSLTruststoreLocationProp, sslTruststoreLocation) + channelConfigs.put(SSLTruststorePasswordProp, sslTruststorePassword) + channelConfigs.put(SSLKeyManagerAlgorithmProp, sslKeyManagerAlgorithm) + channelConfigs.put(SSLTrustManagerAlgorithmProp, sslTrustManagerAlgorithm) + channelConfigs.put(SSLNeedClientAuthProp, sslNeedClientAuth: java.lang.Boolean) + channelConfigs.put(SSLWantClientAuthProp, sslWantClientAuth: java.lang.Boolean) + channelConfigs + } } diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala index b320ce9..898cbf6 100755 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ b/core/src/main/scala/kafka/server/KafkaServer.scala @@ -134,7 +134,7 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg this.logIdent = "[Kafka Server " + config.brokerId + "], " val metrics = new Metrics(metricConfig, reporters, socketServerTime) - + val channelConfigs = config.channelConfigs socketServer = new SocketServer(config.brokerId, config.listeners, @@ -146,6 +146,7 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg config.maxConnectionsPerIp, config.connectionsMaxIdleMs, config.maxConnectionsPerIpOverrides, + channelConfigs, socketServerTime, metrics) socketServer.startup() diff --git a/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala b/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala index e4bf2df..1d31dd7 100755 --- a/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala +++ b/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala @@ -110,9 +110,9 @@ class UncleanLeaderElectionTest extends JUnit3Suite with ZooKeeperTestHarness { } def testUncleanLeaderElectionDisabled { - // disable unclean leader election - configProps1.put("unclean.leader.election.enable", String.valueOf(false)) - configProps2.put("unclean.leader.election.enable", String.valueOf(false)) + // disable unclean leader election + configProps1.put("unclean.leader.election.enable", String.valueOf(false)) + configProps2.put("unclean.leader.election.enable", String.valueOf(false)) startBrokers(Seq(configProps1, configProps2)) // create topic with 1 partition, 2 replicas, one on each broker diff --git a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala deleted file mode 100644 index 7dc2fad..0000000 --- a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala +++ /dev/null @@ -1,202 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.network; - -import java.net._ -import java.io._ -import kafka.cluster.EndPoint -import org.apache.kafka.common.metrics.Metrics -import org.apache.kafka.common.network.NetworkSend -import org.apache.kafka.common.protocol.SecurityProtocol -import org.apache.kafka.common.utils.SystemTime -import org.junit._ -import org.scalatest.junit.JUnitSuite -import java.util.Random -import junit.framework.Assert._ -import kafka.producer.SyncProducerConfig -import kafka.api.ProducerRequest -import java.nio.ByteBuffer -import kafka.common.TopicAndPartition -import kafka.message.ByteBufferMessageSet -import java.nio.channels.SelectionKey -import kafka.utils.TestUtils -import scala.collection.Map - -class SocketServerTest extends JUnitSuite { - - val server: SocketServer = new SocketServer(0, - Map(SecurityProtocol.PLAINTEXT -> EndPoint(null, 0, SecurityProtocol.PLAINTEXT), - SecurityProtocol.TRACE -> EndPoint(null, 0, SecurityProtocol.TRACE)), - numProcessorThreads = 1, - maxQueuedRequests = 50, - sendBufferSize = 300000, - recvBufferSize = 300000, - maxRequestSize = 50, - maxConnectionsPerIp = 5, - connectionsMaxIdleMs = 60*1000, - maxConnectionsPerIpOverrides = Map.empty[String,Int], - new SystemTime(), - new Metrics()) - server.startup() - - def sendRequest(socket: Socket, id: Short, request: Array[Byte]) { - val outgoing = new DataOutputStream(socket.getOutputStream) - outgoing.writeInt(request.length + 2) - outgoing.writeShort(id) - outgoing.write(request) - outgoing.flush() - } - - def receiveResponse(socket: Socket): Array[Byte] = { - val incoming = new DataInputStream(socket.getInputStream) - val len = incoming.readInt() - val response = new Array[Byte](len) - incoming.readFully(response) - response - } - - /* A simple request handler that just echos back the response */ - def processRequest(channel: RequestChannel) { - val request = channel.receiveRequest - val byteBuffer = ByteBuffer.allocate(request.requestObj.sizeInBytes) - request.requestObj.writeTo(byteBuffer) - byteBuffer.rewind() - val send = new NetworkSend(request.connectionId, byteBuffer) - channel.sendResponse(new RequestChannel.Response(request.processor, request, send)) - } - - def connect(s:SocketServer = server, protocol: SecurityProtocol = SecurityProtocol.PLAINTEXT) = { - new Socket("localhost", server.boundPort(protocol)) - } - - - @After - def cleanup() { - server.shutdown() - } - @Test - def simpleRequest() { - val plainSocket = connect(protocol = SecurityProtocol.PLAINTEXT) - val traceSocket = connect(protocol = SecurityProtocol.TRACE) - val correlationId = -1 - val clientId = SyncProducerConfig.DefaultClientId - val ackTimeoutMs = SyncProducerConfig.DefaultAckTimeoutMs - val ack = SyncProducerConfig.DefaultRequiredAcks - val emptyRequest = - new ProducerRequest(correlationId, clientId, ack, ackTimeoutMs, collection.mutable.Map[TopicAndPartition, ByteBufferMessageSet]()) - - val byteBuffer = ByteBuffer.allocate(emptyRequest.sizeInBytes) - emptyRequest.writeTo(byteBuffer) - byteBuffer.rewind() - val serializedBytes = new Array[Byte](byteBuffer.remaining) - byteBuffer.get(serializedBytes) - - // Test PLAINTEXT socket - sendRequest(plainSocket, 0, serializedBytes) - processRequest(server.requestChannel) - assertEquals(serializedBytes.toSeq, receiveResponse(plainSocket).toSeq) - - // Test TRACE socket - sendRequest(traceSocket, 0, serializedBytes) - processRequest(server.requestChannel) - assertEquals(serializedBytes.toSeq, receiveResponse(traceSocket).toSeq) - } - - @Test - def tooBigRequestIsRejected() { - val tooManyBytes = new Array[Byte](server.maxRequestSize + 1) - new Random().nextBytes(tooManyBytes) - val socket = connect() - sendRequest(socket, 0, tooManyBytes) - try { - receiveResponse(socket) - } catch { - case e: IOException => // thats fine - } - } - - @Test - def testSocketsCloseOnShutdown() { - // open a connection - val plainSocket = connect(protocol = SecurityProtocol.PLAINTEXT) - val traceSocket = connect(protocol = SecurityProtocol.TRACE) - val bytes = new Array[Byte](40) - // send a request first to make sure the connection has been picked up by the socket server - sendRequest(plainSocket, 0, bytes) - sendRequest(traceSocket, 0, bytes) - processRequest(server.requestChannel) - - // make sure the sockets are open - server.acceptors.values.map(acceptor => assertFalse(acceptor.serverChannel.socket.isClosed)) - // then shutdown the server - server.shutdown() - - val largeChunkOfBytes = new Array[Byte](1000000) - // doing a subsequent send should throw an exception as the connection should be closed. - // send a large chunk of bytes to trigger a socket flush - try { - sendRequest(plainSocket, 0, largeChunkOfBytes) - fail("expected exception when writing to closed plain socket") - } catch { - case e: IOException => // expected - } - - try { - sendRequest(traceSocket, 0, largeChunkOfBytes) - fail("expected exception when writing to closed trace socket") - } catch { - case e: IOException => // expected - } - } - - @Test - def testMaxConnectionsPerIp() { - // make the maximum allowable number of connections and then leak them - val conns = (0 until server.maxConnectionsPerIp).map(i => connect()) - // now try one more (should fail) - val conn = connect() - conn.setSoTimeout(3000) - assertEquals(-1, conn.getInputStream().read()) - } - - @Test - def testMaxConnectionsPerIPOverrides(): Unit = { - val overrideNum = 6 - val overrides: Map[String, Int] = Map("localhost" -> overrideNum) - val overrideServer: SocketServer = new SocketServer(0, - Map(SecurityProtocol.PLAINTEXT -> EndPoint(null, 0, SecurityProtocol.PLAINTEXT)), - numProcessorThreads = 1, - maxQueuedRequests = 50, - sendBufferSize = 300000, - recvBufferSize = 300000, - maxRequestSize = 50, - maxConnectionsPerIp = 5, - connectionsMaxIdleMs = 60*1000, - maxConnectionsPerIpOverrides = overrides, - new SystemTime(), - new Metrics()) - overrideServer.startup() - // make the maximum allowable number of connections and then leak them - val conns = ((0 until overrideNum).map(i => connect(overrideServer))) - // now try one more (should fail) - val conn = connect(overrideServer) - conn.setSoTimeout(3000) - assertEquals(-1, conn.getInputStream.read()) - overrideServer.shutdown() - } -} diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala index ace6321..fe5ac8b 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala @@ -249,11 +249,29 @@ class KafkaConfigConfigDefTest extends JUnit3Suite { case KafkaConfig.MetricSampleWindowMsProp => expected.setProperty(name, "1000") case KafkaConfig.MetricReporterClassesProp => expected.setProperty(name, "") + //SSL Configs + case KafkaConfig.PrincipalBuilderClassProp => expected.setProperty(name, "") + case KafkaConfig.SSLProtocolProp => expected.setProperty(name, "SSL") + case KafkaConfig.SSLProviderProp => expected.setProperty(name, "") + case KafkaConfig.SSLEnabledProtocolsProp => expected.setProperty(name, "SSLv2,SSLv3") + case KafkaConfig.SSLKeystoreTypeProp => expected.setProperty(name, "JKS") + case KafkaConfig.SSLKeystoreLocationProp => expected.setProperty(name, "/tmp/keystore.jks") + case KafkaConfig.SSLKeystorePasswordProp => expected.setProperty(name, "password") + case KafkaConfig.SSLKeyPasswordProp => expected.setProperty(name, "kpassword") + case KafkaConfig.SSLTruststoreTypeProp => expected.setProperty(name, "jks") + case KafkaConfig.SSLTruststorePasswordProp => expected.setProperty(name, "tpassword") + case KafkaConfig.SSLTruststoreLocationProp => expected.setProperty(name, "/tmp/truststore.jks") + case KafkaConfig.SSLKeyManagerAlgorithmProp => expected.setProperty(name, "ssl") + case KafkaConfig.SSLTrustManagerAlgorithmProp => expected.setProperty(name, "tls") + case KafkaConfig.SSLNeedClientAuthProp => expected.setProperty(name, randFrom("true", "false")) + case KafkaConfig.SSLWantClientAuthProp => expected.setProperty(name, randFrom("true", "false")) case nonNegativeIntProperty => expected.setProperty(name, nextInt(Int.MaxValue).toString) } }) val actual = KafkaConfig.fromProps(expected).toProps + println(actual) + println(expected) Assert.assertEquals(expected, actual) } @@ -356,6 +374,23 @@ class KafkaConfigConfigDefTest extends JUnit3Suite { case KafkaConfig.MetricSampleWindowMsProp => assertPropertyInvalid(getBaseProperties, name, "not_a_number", "-1", "0") case KafkaConfig.MetricReporterClassesProp => // ignore string + //SSL Configs + case KafkaConfig.PrincipalBuilderClassProp => + case KafkaConfig.SSLProtocolProp => // ignore string + case KafkaConfig.SSLProviderProp => // ignore string + case KafkaConfig.SSLEnabledProtocolsProp => + case KafkaConfig.SSLKeystoreTypeProp => // ignore string + case KafkaConfig.SSLKeystoreLocationProp => // ignore string + case KafkaConfig.SSLKeystorePasswordProp => // ignore string + case KafkaConfig.SSLKeyPasswordProp => // ignore string + case KafkaConfig.SSLTruststoreTypeProp => // ignore string + case KafkaConfig.SSLTruststorePasswordProp => // ignore string + case KafkaConfig.SSLTruststoreLocationProp => // ignore string + case KafkaConfig.SSLKeyManagerAlgorithmProp => + case KafkaConfig.SSLTrustManagerAlgorithmProp => + case KafkaConfig.SSLNeedClientAuthProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean", "0") + case KafkaConfig.SSLWantClientAuthProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean", "0") + case nonNegativeIntProperty => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "-1") } }) -- 2.4.6 From 9328ffa464711a835be8935cb09922230e0e1a58 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Sat, 20 Jun 2015 10:47:01 -0700 Subject: [PATCH 15/30] KAFKA-1684. SSL for socketServer. --- build.gradle | 2 + .../org/apache/kafka/clients/NetworkClient.java | 30 +-- .../apache/kafka/common/network/Authenticator.java | 2 +- .../kafka/common/network/ByteBufferSend.java | 2 +- .../org/apache/kafka/common/network/Channel.java | 21 +- .../kafka/common/network/ChannelBuilder.java | 2 +- .../kafka/common/network/DefaultAuthenticator.java | 2 +- .../kafka/common/network/NetworkReceive.java | 3 +- .../common/network/PlainTextChannelBuilder.java | 4 +- .../common/network/PlainTextTransportLayer.java | 15 +- .../kafka/common/network/SSLChannelBuilder.java | 4 +- .../kafka/common/network/SSLTransportLayer.java | 19 +- .../org/apache/kafka/common/network/Selector.java | 15 +- .../kafka/common/network/TransportLayer.java | 7 +- .../org/apache/kafka/clients/ClientUtilsTest.java | 2 +- .../kafka/common/network/SSLFactoryTest.java | 9 +- .../kafka/common/network/SSLSelectorTest.java | 9 +- .../java/org/apache/kafka/test/TestSSLUtils.java | 56 +++--- .../main/scala/kafka/network/SocketServer.scala | 115 ++++++----- core/src/main/scala/kafka/server/KafkaConfig.scala | 6 +- core/src/main/scala/kafka/server/KafkaServer.scala | 124 +++++------- .../integration/kafka/api/ProducerSendTest.scala | 4 +- .../kafka/api/SSLProducerSendTest.scala | 108 +++++++++++ .../scala/unit/kafka/admin/AddPartitionsTest.scala | 2 +- .../unit/kafka/network/SocketServerTest.scala | 211 +++++++++++++++++++++ .../test/scala/unit/kafka/utils/TestUtils.scala | 68 ++++++- 26 files changed, 604 insertions(+), 238 deletions(-) create mode 100644 core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala create mode 100644 core/src/test/scala/unit/kafka/network/SocketServerTest.scala diff --git a/build.gradle b/build.gradle index 673f2dc..6355c56 100644 --- a/build.gradle +++ b/build.gradle @@ -216,7 +216,9 @@ project(':core') { testCompile 'junit:junit:4.6' testCompile 'org.easymock:easymock:3.0' testCompile 'org.objenesis:objenesis:1.2' + testCompile 'org.bouncycastle:bcpkix-jdk15on:1.52' testCompile project(':clients') + testCompile project(':clients').sourceSets.test.output if (scalaVersion.startsWith('2.10')) { testCompile 'org.scalatest:scalatest_2.10:1.9.1' } else if (scalaVersion.startsWith('2.11')) { diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java index 48fe796..d90daa4 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -101,7 +101,7 @@ public class NetworkClient implements KafkaClient { /** * Begin connecting to the given node, return true if we are already connected and ready to send to that node. - * + * * @param node The node to check * @param now The current timestamp * @return True if we are ready to send to the given node @@ -122,7 +122,7 @@ public class NetworkClient implements KafkaClient { * Returns the number of milliseconds to wait, based on the connection state, before attempting to send data. When * disconnected, this respects the reconnect backoff time. When connecting or connected, this handles slow/stalled * connections. - * + * * @param node The node to check * @param now The current timestamp * @return The number of milliseconds to wait. @@ -147,7 +147,7 @@ public class NetworkClient implements KafkaClient { /** * Check if the node with the given id is ready to send more requests. - * + * * @param node The node * @param now The current time in ms * @return true if the node is ready @@ -166,7 +166,7 @@ public class NetworkClient implements KafkaClient { /** * Are we connected and ready and able to send more requests to the given connection? - * + * * @param node The node */ private boolean isSendable(String node) { @@ -175,7 +175,7 @@ public class NetworkClient implements KafkaClient { /** * Return the state of the connection to the given node - * + * * @param node The node to check * @return The connection state */ @@ -185,7 +185,7 @@ public class NetworkClient implements KafkaClient { /** * Queue up the given request for sending. Requests can only be sent out to ready nodes. - * + * * @param request The request */ @Override @@ -200,7 +200,7 @@ public class NetworkClient implements KafkaClient { /** * Do actual reads and writes to sockets. - * + * * @param timeout The maximum amount of time to wait (in ms) for responses if there are none immediately * @param now The current time in milliseconds * @return The list of responses received @@ -246,7 +246,7 @@ public class NetworkClient implements KafkaClient { /** * Await all the outstanding responses for requests on the given connection - * + * * @param node The node to block on * @param now The current time in ms * @return All the collected responses @@ -294,7 +294,7 @@ public class NetworkClient implements KafkaClient { /** * Generate a request header for the given API key - * + * * @param key The api key * @return A request header with the appropriate client id and correlation id */ @@ -324,7 +324,7 @@ public class NetworkClient implements KafkaClient { * prefer a node with an existing connection, but will potentially choose a node for which we don't yet have a * connection if all existing connections are in use. This method will never choose a node for which there is no * existing connection and from which we have disconnected within the reconnect backoff period. - * + * * @return The node with the fewest in-flight requests. */ public Node leastLoadedNode(long now) { @@ -349,7 +349,7 @@ public class NetworkClient implements KafkaClient { /** * Handle any completed request send. In particular if no response is expected consider the request complete. - * + * * @param responses The list of responses to update * @param now The current time */ @@ -366,7 +366,7 @@ public class NetworkClient implements KafkaClient { /** * Handle any completed receives and update the response list with the responses received. - * + * * @param responses The list of responses to update * @param now The current time */ @@ -407,7 +407,7 @@ public class NetworkClient implements KafkaClient { /** * Handle any disconnected connections - * + * * @param responses The list of responses that completed with the disconnection * @param now The current time */ diff --git a/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java b/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java index 8ab004f..b3f574b 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java @@ -49,6 +49,6 @@ public interface Authenticator { /** * returns true if authentication is complete otherwise returns false; */ - boolean isComplete(); + boolean complete(); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java b/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java index 85babbd..86fc6f7 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java @@ -26,7 +26,7 @@ public class ByteBufferSend implements Send { protected final ByteBuffer[] buffers; private int remaining; private int size; - private boolean pending; + private boolean pending = false; public ByteBufferSend(String destination, ByteBuffer... buffers) { super(); diff --git a/clients/src/main/java/org/apache/kafka/common/network/Channel.java b/clients/src/main/java/org/apache/kafka/common/network/Channel.java index 9f76319..0085402 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Channel.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Channel.java @@ -32,15 +32,17 @@ import org.slf4j.LoggerFactory; public class Channel { private static final Logger log = LoggerFactory.getLogger(Channel.class); private final String id; - private TransportLayer transportLayer; + public TransportLayer transportLayer; private Authenticator authenticator; private NetworkReceive receive; private Send send; + private int maxReceiveSize; - public Channel(String id, TransportLayer transportLayer, Authenticator authenticator) throws IOException { + public Channel(String id, TransportLayer transportLayer, Authenticator authenticator, int maxReceiveSize) throws IOException { this.id = id; this.transportLayer = transportLayer; this.authenticator = authenticator; + this.maxReceiveSize = maxReceiveSize; } public void close() throws IOException { @@ -61,11 +63,11 @@ public class Channel { * Does handshake of transportLayer and Authentication using configured authenticator */ public void prepare() throws IOException { - if (transportLayer.isReady() && authenticator.isComplete()) + if (transportLayer.ready() && authenticator.complete()) return; - if (!transportLayer.isReady()) + if (!transportLayer.ready()) transportLayer.handshake(); - if (transportLayer.isReady() && !authenticator.isComplete()) + if (transportLayer.ready() && !authenticator.complete()) authenticator.authenticate(); } @@ -90,8 +92,8 @@ public class Channel { transportLayer.addInterestOps(SelectionKey.OP_READ); } - public boolean isReady() { - return transportLayer.isReady() && authenticator.isComplete(); + public boolean ready() { + return transportLayer.ready() && authenticator.complete(); } public String socketDescription() { @@ -115,9 +117,10 @@ public class Channel { NetworkReceive result = null; if (receive == null) { - receive = new NetworkReceive(id); + receive = new NetworkReceive(maxReceiveSize, id); } - receive(receive); + + long x = receive(receive); if (receive.complete()) { receive.payload().rewind(); result = receive; diff --git a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java index a9a88db..e0ff4e8 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java @@ -33,7 +33,7 @@ public interface ChannelBuilder { * @param id channel id * @param key SelectionKey */ - public Channel buildChannel(String id, SelectionKey key) throws KafkaException; + public Channel buildChannel(String id, SelectionKey key, int maxReceiveSize) throws KafkaException; /** diff --git a/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java index d5e24ad..371c97d 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java @@ -56,7 +56,7 @@ public class DefaultAuthenticator implements Authenticator { * DefaultAuthenticator doesn't implement any additional authentication. * @returns true */ - public boolean isComplete() { + public boolean complete() { return true; } diff --git a/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java b/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java index 4fbc53c..6444453 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java +++ b/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java @@ -87,8 +87,9 @@ public class NetworkReceive implements Receive { int receiveSize = size.getInt(); if (receiveSize < 0) throw new InvalidReceiveException("Invalid receive (size = " + receiveSize + ")"); - if (maxSize != UNLIMITED && receiveSize > maxSize) + if (maxSize != UNLIMITED && receiveSize > maxSize) { throw new InvalidReceiveException("Invalid receive (size = " + receiveSize + " larger than " + maxSize + ")"); + } this.buffer = ByteBuffer.allocate(receiveSize); } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java index eb2cbf3..4b9837a 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java @@ -37,12 +37,12 @@ public class PlainTextChannelBuilder implements ChannelBuilder { } } - public Channel buildChannel(String id, SelectionKey key) throws KafkaException { + public Channel buildChannel(String id, SelectionKey key, int maxReceiveSize) throws KafkaException { Channel channel = null; try { PlainTextTransportLayer transportLayer = new PlainTextTransportLayer(key); Authenticator authenticator = new DefaultAuthenticator(transportLayer, this.principalBuilder); - channel = new Channel(id, transportLayer, authenticator); + channel = new Channel(id, transportLayer, authenticator, maxReceiveSize); } catch (Exception e) { log.warn("Failed to create channel due to ", e); throw new KafkaException(e); diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java index b7529a7..2390f03 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java @@ -44,7 +44,7 @@ public class PlainTextTransportLayer implements TransportLayer { this.socketChannel = (SocketChannel) key.channel(); } - public boolean isReady() { + public boolean ready() { return true; } @@ -79,17 +79,6 @@ public class PlainTextTransportLayer implements TransportLayer { socketChannel.close(); } - - - /** - * There won't be any pending bytes to written socketChannel once write method is called. - * This will always return false. - */ - public boolean pending() { - return false; - } - - /** * Performs SSL handshake hence is a no-op for the non-secure * implementation @@ -97,7 +86,6 @@ public class PlainTextTransportLayer implements TransportLayer { */ public void handshake() throws IOException {} - /** * Reads a sequence of bytes from this channel into the given buffer. * @@ -107,6 +95,7 @@ public class PlainTextTransportLayer implements TransportLayer { */ public int read(ByteBuffer dst) throws IOException { + System.out.println("in read " + dst.remaining()); return socketChannel.read(dst); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java index 8ba9f00..0a74a2b 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java @@ -44,7 +44,7 @@ public class SSLChannelBuilder implements ChannelBuilder { } } - public Channel buildChannel(String id, SelectionKey key) throws KafkaException { + public Channel buildChannel(String id, SelectionKey key, int maxReceiveSize) throws KafkaException { Channel channel = null; try { SocketChannel socketChannel = (SocketChannel) key.channel(); @@ -52,7 +52,7 @@ public class SSLChannelBuilder implements ChannelBuilder { sslFactory.createSSLEngine(socketChannel.socket().getInetAddress().getHostName(), socketChannel.socket().getPort())); Authenticator authenticator = new DefaultAuthenticator(transportLayer, this.principalBuilder); - channel = new Channel(id, transportLayer, authenticator); + channel = new Channel(id, transportLayer, authenticator, maxReceiveSize); } catch (Exception e) { log.info("Failed to create channel due to ", e); throw new KafkaException(e); diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index b9f57aa..8e88b7a 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -66,6 +66,7 @@ public class SSLTransportLayer implements TransportLayer { this.netWriteBuffer = ByteBuffer.allocateDirect(packetBufferSize()); this.appReadBuffer = ByteBuffer.allocateDirect(applicationBufferSize()); this.socketSendBufferSize = this.socketChannel.socket().getSendBufferSize(); + startHandshake(); } /** @@ -80,6 +81,7 @@ public class SSLTransportLayer implements TransportLayer { handshakeComplete = false; closed = false; closing = false; + addInterestOps(SelectionKey.OP_READ); //initiate handshake sslEngine.beginHandshake(); handshakeStatus = sslEngine.getHandshakeStatus(); @@ -88,7 +90,7 @@ public class SSLTransportLayer implements TransportLayer { } - public boolean isReady() { + public boolean ready() { return handshakeComplete; } @@ -98,9 +100,7 @@ public class SSLTransportLayer implements TransportLayer { public void finishConnect() throws IOException { socketChannel.finishConnect(); removeInterestOps(SelectionKey.OP_CONNECT); - addInterestOps(SelectionKey.OP_READ); key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT | SelectionKey.OP_READ); - startHandshake(); } /** @@ -177,7 +177,6 @@ public class SSLTransportLayer implements TransportLayer { boolean write = key.isWritable(); handshakeComplete = false; handshakeStatus = sslEngine.getHandshakeStatus(); - if (!flush(netWriteBuffer)) { key.interestOps(SelectionKey.OP_WRITE); return; @@ -368,13 +367,11 @@ public class SSLTransportLayer implements TransportLayer { } if (dst.remaining() > 0) { - boolean canRead = true; netReadBuffer = Utils.ensureCapacity(netReadBuffer, packetBufferSize()); - if (canRead && netReadBuffer.remaining() > 0) { + if (netReadBuffer.remaining() > 0) { int netread = socketChannel.read(netReadBuffer); - canRead = netread > 0; + if (netread == 0) return netread; } - do { netReadBuffer.flip(); SSLEngineResult unwrapResult = sslEngine.unwrap(netReadBuffer, appReadBuffer); @@ -401,13 +398,16 @@ public class SSLTransportLayer implements TransportLayer { } else if (unwrapResult.getStatus() == Status.BUFFER_UNDERFLOW) { int currentPacketBufferSize = packetBufferSize(); netReadBuffer = Utils.ensureCapacity(netReadBuffer, currentPacketBufferSize); + if (netReadBuffer.position() >= currentPacketBufferSize) { + throw new IllegalStateException("Buffer underflow when available data (" + netReadBuffer.position() + + ") > packet buffer size (" + currentPacketBufferSize + ")"); + } break; } else if (unwrapResult.getStatus() == Status.CLOSED) { throw new EOFException(); } } while (netReadBuffer.position() != 0); } - return read; } @@ -516,7 +516,6 @@ public class SSLTransportLayer implements TransportLayer { totalWritten += written; } } - if (!srcs[i].hasRemaining()) { i++; } else { diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index 4187276..64237e5 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -154,7 +154,7 @@ public class Selector implements Selectable { throw e; } SelectionKey key = socketChannel.register(nioSelector, SelectionKey.OP_CONNECT); - Channel channel = channelBuilder.buildChannel(id, key); + Channel channel = channelBuilder.buildChannel(id, key, maxReceiveSize); key.attach(channel); this.channels.put(id, channel); } @@ -166,7 +166,7 @@ public class Selector implements Selectable { */ public void register(String id, SocketChannel socketChannel) throws ClosedChannelException { SelectionKey key = socketChannel.register(nioSelector, SelectionKey.OP_READ); - Channel channel = channelBuilder.buildChannel(id, key); + Channel channel = channelBuilder.buildChannel(id, key, maxReceiveSize); key.attach(channel); this.channels.put(id, channel); } @@ -269,12 +269,12 @@ public class Selector implements Selectable { } /* if channel is not ready finish prepare */ - if (!channel.isReady()) { + if (!channel.ready()) { channel.prepare(); } - /* if channel is ready read from any connections that have readable data */ - if (channel.isReady() && key.isReadable()) { + /* if channel is ready read from any connections that have readable data */ + if (channel.ready() && key.isReadable()) { NetworkReceive networkReceive; try { if ((networkReceive = channel.read()) != null) { @@ -289,7 +289,7 @@ public class Selector implements Selectable { } /* if channel is ready write to any sockets that have space in their buffer and for which we have data */ - if (key.isWritable() && channel.isReady()) { + if (channel.ready() && key.isWritable()) { Send send = channel.write(); if (send != null) { this.completedSends.add(send); @@ -423,7 +423,8 @@ public class Selector implements Selectable { */ public void close(String id) { Channel channel = this.channels.get(id); - close(channel); + if (channel != null) + close(channel); } /** diff --git a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java index 6a085c6..2fa4437 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java @@ -33,7 +33,7 @@ public interface TransportLayer extends ScatteringByteChannel, GatheringByteChan /** * Returns true if the channel has handshake and authenticaiton done. */ - boolean isReady(); + boolean ready(); /** * Finishes the process of connecting a socket channel. @@ -52,11 +52,6 @@ public interface TransportLayer extends ScatteringByteChannel, GatheringByteChan /** - * returns true if there are any pending bytes needs to be written to channel. - */ - boolean pending(); - - /** * Performs SSL handshake hence is a no-op for the non-secure * implementation * @throws IOException diff --git a/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java b/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java index 13ce519..d6a4019 100644 --- a/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/ClientUtilsTest.java @@ -39,4 +39,4 @@ public class ClientUtilsTest { private void check(String... url) { ClientUtils.parseAndValidateAddresses(Arrays.asList(url)); } -} \ No newline at end of file +} diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java index 9e3926c..a2cf302 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java @@ -14,6 +14,7 @@ package org.apache.kafka.common.network; import javax.net.ssl.*; +import java.io.File; import java.util.Map; import org.apache.kafka.test.TestSSLUtils; @@ -33,8 +34,8 @@ public class SSLFactoryTest { @Test public void testSSLFactoryConfiguration() throws Exception { - Map> sslConfigs = TestSSLUtils.createSSLConfigs(false, true); - Map serverSSLConfig = sslConfigs.get(SSLFactory.Mode.SERVER); + File trustStoreFile = File.createTempFile("truststore", ".jks"); + Map serverSSLConfig = TestSSLUtils.createSSLConfig(false, true, SSLFactory.Mode.SERVER, trustStoreFile, "server"); SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER); sslFactory.configure(serverSSLConfig); //host and port are hints @@ -47,8 +48,8 @@ public class SSLFactoryTest { @Test public void testClientMode() throws Exception { - Map> sslConfigs = TestSSLUtils.createSSLConfigs(false, true); - Map clientSSLConfig = sslConfigs.get(SSLFactory.Mode.CLIENT); + File trustStoreFile = File.createTempFile("truststore", ".jks"); + Map clientSSLConfig = TestSSLUtils.createSSLConfig(false, true, SSLFactory.Mode.CLIENT, trustStoreFile, "client"); SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT); sslFactory.configure(clientSSLConfig); //host and port are hints diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java index 1a8cc27..478afbb 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java @@ -19,6 +19,7 @@ import java.util.LinkedHashMap; import java.util.Map; import java.io.IOException; +import java.io.File; import java.net.InetSocketAddress; import java.nio.ByteBuffer; @@ -46,12 +47,13 @@ public class SSLSelectorTest { @Before public void setup() throws Exception { - Map> sslConfigs = TestSSLUtils.createSSLConfigs(false, true); - Map sslServerConfigs = sslConfigs.get(SSLFactory.Mode.SERVER); + File trustStoreFile = File.createTempFile("truststore", ".jks"); + + Map sslServerConfigs = TestSSLUtils.createSSLConfig(false, true, SSLFactory.Mode.SERVER, trustStoreFile, "server"); sslServerConfigs.put(SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SSLConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); this.server = new EchoServer(sslServerConfigs); this.server.start(); - Map sslClientConfigs = sslConfigs.get(SSLFactory.Mode.CLIENT); + Map sslClientConfigs = TestSSLUtils.createSSLConfig(false, false, SSLFactory.Mode.SERVER, trustStoreFile, "client"); sslClientConfigs.put(SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SSLConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); this.channelBuilder = new SSLChannelBuilder(SSLFactory.Mode.CLIENT); @@ -209,7 +211,6 @@ public class SSLSelectorTest { // } // } - private String blockingRequest(String node, String s) throws IOException { selector.send(createSend(node, s)); while (true) { diff --git a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java index 1930cc2..08cd598 100644 --- a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java +++ b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java @@ -23,7 +23,9 @@ import org.apache.kafka.clients.CommonClientConfigs; import java.io.File; import java.io.FileOutputStream; +import java.io.FileInputStream; import java.io.IOException; +import java.io.EOFException; import java.math.BigInteger; import javax.net.ssl.TrustManagerFactory; import java.security.*; @@ -153,7 +155,14 @@ public class TestSSLUtils { public static void createTrustStore( String filename, String password, Map certs) throws GeneralSecurityException, IOException { - KeyStore ks = createEmptyKeyStore(); + KeyStore ks = KeyStore.getInstance("JKS"); + try { + FileInputStream in = new FileInputStream(filename); + ks.load(in, password.toCharArray()); + in.close(); + } catch (EOFException e) { + ks = createEmptyKeyStore(); + } for (Map.Entry cert : certs.entrySet()) { ks.setCertificateEntry(cert.getKey(), cert.getValue()); } @@ -194,42 +203,41 @@ public class TestSSLUtils { return sslConfigs; } - public static Map> createSSLConfigs(boolean useClientCert, boolean trustStore) + public static Map createSSLConfig(boolean useClientCert, boolean trustStore, SSLFactory.Mode mode, File trustStoreFile, String certAlias) throws IOException, GeneralSecurityException { - Map> sslConfigs = new HashMap>(); Map certs = new HashMap(); - File trustStoreFile = File.createTempFile("truststore", ".jks"); - File clientKeyStoreFile = null; - File serverKeyStoreFile = File.createTempFile("serverKS", ".jks"); - String clientPassword = "ClientPassword"; - String serverPassword = "ServerPassword"; + File keyStoreFile; + String password; + + if (mode == SSLFactory.Mode.SERVER) + password = "ServerPassword"; + else + password = "ClientPassword"; + String trustStorePassword = "TrustStorePassword"; if (useClientCert) { - clientKeyStoreFile = File.createTempFile("clientKS", ".jks"); + keyStoreFile = File.createTempFile("clientKS", ".jks"); KeyPair cKP = generateKeyPair("RSA"); X509Certificate cCert = generateCertificate("CN=localhost, O=client", cKP, 30, "SHA1withRSA"); - createKeyStore(clientKeyStoreFile.getPath(), clientPassword, "client", cKP.getPrivate(), cCert); - certs.put("client", cCert); + createKeyStore(keyStoreFile.getPath(), password, "client", cKP.getPrivate(), cCert); + certs.put(certAlias, cCert); + } else { + keyStoreFile = File.createTempFile("serverKS", ".jks"); + KeyPair sKP = generateKeyPair("RSA"); + X509Certificate sCert = generateCertificate("CN=localhost, O=server", sKP, 30, + "SHA1withRSA"); + createKeyStore(keyStoreFile.getPath(), password, password, "server", sKP.getPrivate(), sCert); + certs.put(certAlias, sCert); } - KeyPair sKP = generateKeyPair("RSA"); - X509Certificate sCert = generateCertificate("CN=localhost, O=server", sKP, 30, - "SHA1withRSA"); - createKeyStore(serverKeyStoreFile.getPath(), serverPassword, serverPassword, "server", sKP.getPrivate(), sCert); - certs.put("server", sCert); - if (trustStore) { createTrustStore(trustStoreFile.getPath(), trustStorePassword, certs); } - Map clientSSLConfig = createSSLConfig(SSLFactory.Mode.CLIENT, clientKeyStoreFile, clientPassword, - clientPassword, trustStoreFile, trustStorePassword, useClientCert); - Map serverSSLConfig = createSSLConfig(SSLFactory.Mode.SERVER, serverKeyStoreFile, serverPassword, - serverPassword, trustStoreFile, trustStorePassword, useClientCert); - sslConfigs.put(SSLFactory.Mode.CLIENT, clientSSLConfig); - sslConfigs.put(SSLFactory.Mode.SERVER, serverSSLConfig); - return sslConfigs; + Map sslConfig = createSSLConfig(mode, keyStoreFile, password, + password, trustStoreFile, trustStorePassword, useClientCert); + return sslConfig; } } diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index 6cf1a8d..a09ed84 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -28,9 +28,10 @@ import com.yammer.metrics.core.Gauge import kafka.cluster.EndPoint import kafka.common.KafkaException import kafka.metrics.KafkaMetricsGroup +import kafka.server.KafkaConfig import kafka.utils._ import org.apache.kafka.common.MetricName -import org.apache.kafka.common.metrics.Metrics +import org.apache.kafka.common.metrics._ import org.apache.kafka.common.network.{InvalidReceiveException, ChannelBuilder, PlainTextChannelBuilder, SSLChannelBuilder, SSLFactory} import org.apache.kafka.common.protocol.SecurityProtocol @@ -45,25 +46,42 @@ import scala.collection._ * Acceptor has N Processor threads that each have their own selector and read requests from sockets * M Handler threads that handle requests and produce responses back to the processor threads for writing. */ -class SocketServer(val brokerId: Int, - val endpoints: Map[SecurityProtocol, EndPoint], - val numProcessorThreads: Int, - val maxQueuedRequests: Int, - val sendBufferSize: Int, - val recvBufferSize: Int, - val maxRequestSize: Int = Int.MaxValue, - val maxConnectionsPerIp: Int = Int.MaxValue, - val connectionsMaxIdleMs: Long, - val maxConnectionsPerIpOverrides: Map[String, Int], - val channelConfigs: java.util.Map[String, Object], - val time: Time, - val metrics: Metrics) extends Logging with KafkaMetricsGroup { - this.logIdent = "[Socket Server on Broker " + brokerId + "], " - val requestChannel = new RequestChannel(numProcessorThreads, maxQueuedRequests) +class SocketServer(val config: KafkaConfig) extends Logging with KafkaMetricsGroup { + + private val jmxPrefix: String = "kafka.server" + private val reporters: java.util.List[MetricsReporter] = config.metricReporterClasses + reporters.add(new JmxReporter(jmxPrefix)) + + private val metricConfig: MetricConfig = new MetricConfig() + .samples(config.metricNumSamples) + .timeWindow(config.metricSampleWindowMs, TimeUnit.MILLISECONDS) + + val channelConfigs = config.channelConfigs + + // This exists so SocketServer (which uses Client libraries) can use the client Time objects without having to convert all of Kafka to use them + // Once we get rid of kafka.utils.time, we can get rid of this too + private val time: org.apache.kafka.common.utils.Time = new org.apache.kafka.common.utils.SystemTime() + + val endpoints = config.listeners + val numProcessorThreads = config.numNetworkThreads + val maxQueuedRequests = config.queuedMaxRequests + val sendBufferSize = config.socketSendBufferBytes + val recvBufferSize = config.socketReceiveBufferBytes + val maxRequestSize = config.socketRequestMaxBytes + val maxConnectionsPerIp = config.maxConnectionsPerIp + val connectionsMaxIdleMs = config.connectionsMaxIdleMs + val maxConnectionsPerIpOverrides = config.maxConnectionsPerIpOverrides + val totalProcessorThreads = numProcessorThreads * endpoints.size + + this.logIdent = "[Socket Server on Broker " + config.brokerId + "], " + + val requestChannel = new RequestChannel(totalProcessorThreads, maxQueuedRequests) + val processors = new Array[Processor](totalProcessorThreads) + private[network] var acceptors = mutable.Map[EndPoint,Acceptor]() - private val allMetricNames = (0 until numProcessorThreads).map { i => + private val allMetricNames = (0 until totalProcessorThreads).map { i => val tags = new util.HashMap[String, String]() tags.put("networkProcessor", i.toString) new MetricName("io-wait-ratio", "socket-server-metrics", tags) @@ -83,32 +101,25 @@ class SocketServer(val brokerId: Int, def startup() { val quotas = new ConnectionQuotas(maxConnectionsPerIp, maxConnectionsPerIpOverrides) - newGauge("NetworkProcessorAvgIdlePercent", - new Gauge[Double] { - def value = allMetricNames.map( metricName => - metrics.metrics().get(metricName).value()).sum / numProcessorThreads - } - ) - - - - // start accepting connections - // right now we will use the same processors for all ports, since we didn't implement different protocols - // in the future, we may implement different processors for SSL and Kerberos - this.synchronized { + var processorIndex = 0 endpoints.values.foreach(endpoint => { - val acceptor = new Acceptor(endpoint.host, endpoint.port, sendBufferSize, recvBufferSize, requestChannel, quotas, endpoint.protocolType, - portToProtocol, channelConfigs, numProcessorThreads, maxQueuedRequests, maxRequestSize, connectionsMaxIdleMs, metrics, time, brokerId) + val acceptor = new Acceptor(endpoint.host, endpoint.port, sendBufferSize, recvBufferSize, requestChannel, processors, quotas, endpoint.protocolType, + portToProtocol, channelConfigs, numProcessorThreads + processorIndex, maxQueuedRequests, maxRequestSize, connectionsMaxIdleMs, new Metrics(metricConfig, reporters, time), + allMetricNames, time, config.brokerId, processorIndex) acceptors.put(endpoint, acceptor) Utils.newThread("kafka-socket-acceptor-%s-%d".format(endpoint.protocolType.toString, endpoint.port), acceptor, false).start() acceptor.awaitStartup + processorIndex += numProcessorThreads }) } info("Started " + acceptors.size + " acceptor threads") } + // register the processor threads for notification of responses + requestChannel.addResponseListener((id:Int) => processors(id).wakeup()) + /** * Shutdown the socket server */ @@ -116,6 +127,7 @@ class SocketServer(val brokerId: Int, info("Shutting down") this.synchronized { acceptors.values.foreach(_.shutdown) + processors.foreach(_.shutdown) } info("Shutdown completed") } @@ -200,6 +212,7 @@ private[kafka] class Acceptor(val host: String, val sendBufferSize: Int, val recvBufferSize: Int, requestChannel: RequestChannel, + processors: Array[Processor], connectionQuotas: ConnectionQuotas, protocol: SecurityProtocol, portToProtocol: ConcurrentHashMap[Int, SecurityProtocol], @@ -209,14 +222,25 @@ private[kafka] class Acceptor(val host: String, maxRequestSize: Int, connectionsMaxIdleMs: Long, metrics: Metrics, + allMetricNames: Seq[MetricName], time: Time, - brokerId: Int) extends AbstractServerThread(connectionQuotas) { + brokerId: Int, + processorIndex: Int) extends AbstractServerThread(connectionQuotas) with KafkaMetricsGroup { val nioSelector = java.nio.channels.Selector.open() val serverChannel = openServerSocket(host, port) - private val processors = new Array[Processor](numProcessorThreads) + portToProtocol.put(serverChannel.socket().getLocalPort, protocol) + + newGauge("NetworkProcessorAvgIdlePercent", + new Gauge[Double] { + def value = allMetricNames.map( metricName => + metrics.metrics().get(metricName).value()).sum / numProcessorThreads + } + ) + + println("processorIndex " + processorIndex + " numProcessorThreads " + numProcessorThreads) this.synchronized { - for (i <- 0 until numProcessorThreads) { + for (i <- processorIndex until numProcessorThreads) { processors(i) = new Processor(i, time, maxRequestSize, @@ -229,19 +253,16 @@ private[kafka] class Acceptor(val host: String, metrics ) Utils.newThread("kafka-network-thread-%d-%s-%d".format(brokerId, protocol.name, i), processors(i), false).start() - } + } } - // register the processor threads for notification of responses - requestChannel.addResponseListener((id:Int) => processors(id).wakeup()) - /** * Accept loop that checks for new connection attempts */ def run() { serverChannel.register(nioSelector, SelectionKey.OP_ACCEPT); startupComplete() - var currentProcessor = 0 + var currentProcessor = processorIndex while(isRunning) { val ready = nioSelector.select(500) if(ready > 0) { @@ -258,7 +279,9 @@ private[kafka] class Acceptor(val host: String, throw new IllegalStateException("Unrecognized key state for acceptor thread.") // round robin to the next processor thread - currentProcessor = (currentProcessor + 1) % processors.length + currentProcessor = (currentProcessor + 1) % numProcessorThreads + if (currentProcessor < processorIndex) currentProcessor = processorIndex + println("current Processor " + currentProcessor + " protocol " + protocol) } catch { case e: Throwable => error("Error while accepting connection", e) } @@ -325,12 +348,6 @@ private[kafka] class Acceptor(val host: String, @Override def wakeup = nioSelector.wakeup() - - override def shutdown() = { - processors.foreach(_.shutdown) - super.shutdown - } - } /** @@ -397,7 +414,7 @@ private[kafka] class Processor(val id: Int, } collection.JavaConversions.collectionAsScalaIterable(selector.completedReceives).foreach( receive => { try { - val req = RequestChannel.Request(processor = id, connectionId = receive.source, buffer = receive.payload, startTimeMs = time.milliseconds, securityProtocol = SecurityProtocol.PLAINTEXT) + val req = RequestChannel.Request(processor = id, connectionId = receive.source, buffer = receive.payload, startTimeMs = time.milliseconds, securityProtocol = protocol) requestChannel.sendRequest(req) } catch { case e @ (_: InvalidRequestException | _: SchemaException) => { @@ -436,7 +453,7 @@ private[kafka] class Processor(val id: Int, selector.unmute(curr.request.connectionId) } case RequestChannel.SendAction => { - trace("Socket server received response to send, registering for write and sending data: " + curr) + println("Socket server received response to send, registering for write and sending data: " + protocol + " id " + id) selector.send(curr.responseSend) inflightResponses += (curr.request.connectionId -> curr) } diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 9d83921..ebb3555 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -954,10 +954,12 @@ class KafkaConfig (/** ********* Zookeeper Configuration ***********/ } catch { case e: Exception => throw new IllegalArgumentException("Error creating broker listeners from '%s': %s".format(listeners, e.getMessage)) } - val distinctPorts = endpoints.map(ep => ep.port).distinct + // filter port 0 for unit tests + val endpointsWithoutZeroPort = endpoints.map(ep => ep.port).filter(_ != 0) + val distinctPorts = endpointsWithoutZeroPort.distinct val distinctProtocols = endpoints.map(ep => ep.protocolType).distinct - require(distinctPorts.size == endpoints.size, "Each listener must have a different port") + require(distinctPorts.size == endpointsWithoutZeroPort.size, "Each listener must have a different port") require(distinctProtocols.size == endpoints.size, "Each listener must have a different protocol") } diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala index 898cbf6..4977ba6 100755 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ b/core/src/main/scala/kafka/server/KafkaServer.scala @@ -28,6 +28,7 @@ import java.io.File import kafka.utils._ import org.apache.kafka.common.metrics._ import org.apache.kafka.common.network.NetworkReceive +import org.apache.kafka.common.protocol.SecurityProtocol import scala.collection.{JavaConversions, mutable} import org.I0Itec.zkclient.ZkClient @@ -51,19 +52,6 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg private var shutdownLatch = new CountDownLatch(1) - private val metricConfig: MetricConfig = new MetricConfig() - .samples(config.metricNumSamples) - .timeWindow(config.metricSampleWindowMs, TimeUnit.MILLISECONDS) - private val jmxPrefix: String = "kafka.server" - private val reporters: java.util.List[MetricsReporter] = config.metricReporterClasses - reporters.add(new JmxReporter(jmxPrefix)) - - - - // This exists so SocketServer (which uses Client libraries) can use the client Time objects without having to convert all of Kafka to use them - // Once we get rid of kafka.utils.time, we can get rid of this too - private val socketServerTime: org.apache.kafka.common.utils.Time = new org.apache.kafka.common.utils.SystemTime() - val brokerState: BrokerState = new BrokerState var apis: KafkaApis = null @@ -87,8 +75,6 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg var kafkaHealthcheck: KafkaHealthcheck = null val metadataCache: MetadataCache = new MetadataCache(config.brokerId) - - var zkClient: ZkClient = null val correlationId: AtomicInteger = new AtomicInteger(0) val brokerMetaPropsFile = "meta.properties" @@ -133,68 +119,54 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg config.brokerId = getBrokerId this.logIdent = "[Kafka Server " + config.brokerId + "], " - val metrics = new Metrics(metricConfig, reporters, socketServerTime) - val channelConfigs = config.channelConfigs - - socketServer = new SocketServer(config.brokerId, - config.listeners, - config.numNetworkThreads, - config.queuedMaxRequests, - config.socketSendBufferBytes, - config.socketReceiveBufferBytes, - config.socketRequestMaxBytes, - config.maxConnectionsPerIp, - config.connectionsMaxIdleMs, - config.maxConnectionsPerIpOverrides, - channelConfigs, - socketServerTime, - metrics) - socketServer.startup() - - /* start replica manager */ - replicaManager = new ReplicaManager(config, time, zkClient, kafkaScheduler, logManager, isShuttingDown) - replicaManager.startup() - - /* start offset manager */ - offsetManager = createOffsetManager() - - /* start kafka controller */ - kafkaController = new KafkaController(config, zkClient, brokerState) - kafkaController.startup() - - /* start kafka coordinator */ - consumerCoordinator = new ConsumerCoordinator(config, zkClient, offsetManager) - consumerCoordinator.startup() - - /* start processing requests */ - apis = new KafkaApis(socketServer.requestChannel, replicaManager, offsetManager, consumerCoordinator, - kafkaController, zkClient, config.brokerId, config, metadataCache) - requestHandlerPool = new KafkaRequestHandlerPool(config.brokerId, socketServer.requestChannel, apis, config.numIoThreads) - brokerState.newState(RunningAsBroker) - - Mx4jLoader.maybeLoad() - - /* start topic config manager */ - topicConfigManager = new TopicConfigManager(zkClient, logManager) - topicConfigManager.startup() - - /* tell everyone we are alive */ - val listeners = config.advertisedListeners.map {case(protocol, endpoint) => - if (endpoint.port == 0) - (protocol, EndPoint(endpoint.host, socketServer.boundPort(), endpoint.protocolType)) - else - (protocol, endpoint) - } - kafkaHealthcheck = new KafkaHealthcheck(config.brokerId, listeners, config.zkSessionTimeoutMs, zkClient) - kafkaHealthcheck.startup() - /* register broker metrics */ - registerStats() + socketServer = new SocketServer(config) + socketServer.startup() + + /* start replica manager */ + replicaManager = new ReplicaManager(config, time, zkClient, kafkaScheduler, logManager, isShuttingDown) + replicaManager.startup() + + /* start offset manager */ + offsetManager = createOffsetManager() - shutdownLatch = new CountDownLatch(1) - startupComplete.set(true) - isStartingUp.set(false) - info("started") + /* start kafka controller */ + kafkaController = new KafkaController(config, zkClient, brokerState) + kafkaController.startup() + + /* start kafka coordinator */ + consumerCoordinator = new ConsumerCoordinator(config, zkClient, offsetManager) + consumerCoordinator.startup() + + /* start processing requests */ + apis = new KafkaApis(socketServer.requestChannel, replicaManager, offsetManager, consumerCoordinator, + kafkaController, zkClient, config.brokerId, config, metadataCache) + requestHandlerPool = new KafkaRequestHandlerPool(config.brokerId, socketServer.requestChannel, apis, config.numIoThreads) + brokerState.newState(RunningAsBroker) + + Mx4jLoader.maybeLoad() + + /* start topic config manager */ + topicConfigManager = new TopicConfigManager(zkClient, logManager) + topicConfigManager.startup() + + /* tell everyone we are alive */ + val listeners = config.advertisedListeners.map {case(protocol, endpoint) => + if (endpoint.port == 0) + (protocol, EndPoint(endpoint.host, socketServer.boundPort(), endpoint.protocolType)) + else + (protocol, endpoint) + } + kafkaHealthcheck = new KafkaHealthcheck(config.brokerId, listeners, config.zkSessionTimeoutMs, zkClient) + kafkaHealthcheck.startup() + + /* register broker metrics */ + registerStats() + + shutdownLatch = new CountDownLatch(1) + startupComplete.set(true) + isStartingUp.set(false) + info("started") } } catch { @@ -386,7 +358,7 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg def getLogManager(): LogManager = logManager - def boundPort(): Int = socketServer.boundPort() + def boundPort(protocol: SecurityProtocol = SecurityProtocol.PLAINTEXT): Int = socketServer.boundPort(protocol) private def createLogManager(zkClient: ZkClient, brokerState: BrokerState): LogManager = { val defaultLogConfig = LogConfig(segmentSize = config.logSegmentBytes, diff --git a/core/src/test/scala/integration/kafka/api/ProducerSendTest.scala b/core/src/test/scala/integration/kafka/api/ProducerSendTest.scala index 9ce4bd5..8a8216d 100644 --- a/core/src/test/scala/integration/kafka/api/ProducerSendTest.scala +++ b/core/src/test/scala/integration/kafka/api/ProducerSendTest.scala @@ -74,7 +74,7 @@ class ProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { def testSendOffset() { var producer = TestUtils.createNewProducer(brokerList) val partition = new Integer(0) - + object callback extends Callback { var offset = 0L def onCompletion(metadata: RecordMetadata, exception: Exception) { @@ -298,7 +298,7 @@ class ProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { } } } - + /** * Test that flush immediately sends all accumulated requests. */ diff --git a/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala b/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala new file mode 100644 index 0000000..2208cca --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.api + +import java.util.Properties +import java.util.concurrent.TimeUnit +import java.io.File + +import kafka.consumer.SimpleConsumer +import kafka.integration.KafkaServerTestHarness +import kafka.message.Message +import kafka.server.KafkaConfig +import kafka.utils.TestUtils +import org.apache.kafka.clients.producer._ +import org.apache.kafka.common.config.ConfigException +import org.apache.kafka.common.errors.SerializationException +import org.apache.kafka.common.serialization.ByteArraySerializer +import org.junit.Assert._ +import org.junit.Test +import org.scalatest.junit.JUnit3Suite + + +class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { + val numServers = 1 + val trustStoreFile = File.createTempFile("truststore", ".jks") + val overridingProps = new Properties() + overridingProps.put(KafkaConfig.NumPartitionsProp, 4.toString) + def generateConfigs() = + TestUtils.createBrokerConfigs(numServers, zkConnect, false, enableSSL=true, trustStoreFile=Some(trustStoreFile)).map(KafkaConfig.fromProps(_, overridingProps)) + + private var consumer1: SimpleConsumer = null + private var consumer2: SimpleConsumer = null + + private val topic = "topic" + private val numRecords = 100 + + override def setUp() { + super.setUp() + + // TODO: we need to migrate to new consumers when 0.9 is final + consumer1 = new SimpleConsumer("localhost", servers(0).boundPort(), 100, 1024*1024, "") + consumer2 = new SimpleConsumer("localhost", servers(0).boundPort(), 100, 1024*1024, "") + + } + + override def tearDown() { + consumer1.close() + consumer2.close() + super.tearDown() + } + + /** + * testSendOffset checks the basic send API behavior + * + * 1. Send with null key/value/partition-id should be accepted; send with null topic should be rejected. + * 2. Last message of the non-blocking send should return the correct offset metadata + */ + @Test + def testSendOffset() { + var producer = TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), enableSSL=true, trustStoreFile=Some(trustStoreFile)) + val partition = new Integer(0) + + object callback extends Callback { + var offset = 0L + def onCompletion(metadata: RecordMetadata, exception: Exception) { + if (exception == null) { + assertEquals(offset, metadata.offset()) + assertEquals(topic, metadata.topic()) + assertEquals(partition, metadata.partition()) + offset += 1 + } else { + fail("Send callback returns the following exception", exception) + } + } + } + + try { + // create topic + TestUtils.createTopic(zkClient, topic, 1, 1, servers) + + // send a normal record + val record0 = new ProducerRecord[Array[Byte],Array[Byte]](topic, partition, "key".getBytes, "value".getBytes) + assertEquals("Should have offset 0", 0L, producer.send(record0, callback).get.offset) + + + } finally { + if (producer != null) { + producer.close() + producer = null + } + } + } +} diff --git a/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala b/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala index df5c6ba..76aa9af 100755 --- a/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala +++ b/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala @@ -45,7 +45,7 @@ class AddPartitionsTest extends JUnit3Suite with ZooKeeperTestHarness { configs = (0 until 4).map(i => KafkaConfig.fromProps(TestUtils.createBrokerConfig(i, zkConnect, enableControlledShutdown = false))) // start all the servers servers = configs.map(c => TestUtils.createServer(c)) - brokers = servers.map(s => new Broker(s.config.brokerId, s.config.hostName, s.boundPort)) + brokers = servers.map(s => new Broker(s.config.brokerId, s.config.hostName, s.boundPort())) // create topics first createTopic(zkClient, topic1, partitionReplicaAssignment = Map(0->Seq(0,1)), servers = servers) diff --git a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala new file mode 100644 index 0000000..6fd4a4f --- /dev/null +++ b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala @@ -0,0 +1,211 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.network; + +import java.net._ +import javax.net.ssl._ +import java.io._ +import kafka.cluster.EndPoint +import org.apache.kafka.common.metrics.Metrics +import org.apache.kafka.common.network.NetworkSend +import org.apache.kafka.common.protocol.SecurityProtocol +import org.apache.kafka.common.utils.SystemTime +import org.junit._ +import org.scalatest.junit.JUnitSuite +import java.util.Random +import junit.framework.Assert._ +import kafka.producer.SyncProducerConfig +import kafka.api.ProducerRequest +import java.nio.ByteBuffer +import kafka.common.TopicAndPartition +import kafka.message.ByteBufferMessageSet +import kafka.server.KafkaConfig +import java.nio.channels.SelectionKey +import kafka.utils.TestUtils +import scala.collection.Map + +class SocketServerTest extends JUnitSuite { + val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0) + props.put("listeners", "PLAINTEXT://localhost:0,TRACE://localhost:0") + props.put("num.network.threads", "1") + props.put("socket.send.buffer.bytes", "300000") + props.put("socket.receive.buffer.bytes", "300000") + props.put("queued.max.requests", "50") + props.put("socket.request.max.bytes", "50") + props.put("max.connections.per.ip", "5") + props.put("connections.max.idle.ms", "60000") + val config: KafkaConfig = KafkaConfig.fromProps(props) + val server: SocketServer = new SocketServer(config) + server.startup() + + def sendRequest(socket: Socket, id: Short, request: Array[Byte]) { + val outgoing = new DataOutputStream(socket.getOutputStream) + outgoing.writeInt(request.length + 2) + outgoing.writeShort(id) + outgoing.write(request) + outgoing.flush() + } + + def receiveResponse(socket: Socket): Array[Byte] = { + val incoming = new DataInputStream(socket.getInputStream) + val len = incoming.readInt() + val response = new Array[Byte](len) + incoming.readFully(response) + response + } + + /* A simple request handler that just echos back the response */ + def processRequest(channel: RequestChannel) { + val request = channel.receiveRequest + val byteBuffer = ByteBuffer.allocate(request.requestObj.sizeInBytes) + request.requestObj.writeTo(byteBuffer) + byteBuffer.rewind() + val send = new NetworkSend(request.connectionId, byteBuffer) + channel.sendResponse(new RequestChannel.Response(request.processor, request, send)) + } + + def connect(s:SocketServer = server, protocol: SecurityProtocol = SecurityProtocol.PLAINTEXT) = { + new Socket("localhost", server.boundPort(protocol)) + } + + + @After + def cleanup() { + server.shutdown() + } + + @Test + def simpleRequest() { + val plainSocket = connect(protocol = SecurityProtocol.PLAINTEXT) + val traceSocket = connect(protocol = SecurityProtocol.TRACE) + val correlationId = -1 + val clientId = SyncProducerConfig.DefaultClientId + val ackTimeoutMs = SyncProducerConfig.DefaultAckTimeoutMs + val ack = SyncProducerConfig.DefaultRequiredAcks + val emptyRequest = + new ProducerRequest(correlationId, clientId, ack, ackTimeoutMs, collection.mutable.Map[TopicAndPartition, ByteBufferMessageSet]()) + + val byteBuffer = ByteBuffer.allocate(emptyRequest.sizeInBytes) + emptyRequest.writeTo(byteBuffer) + byteBuffer.rewind() + val serializedBytes = new Array[Byte](byteBuffer.remaining) + byteBuffer.get(serializedBytes) + + // Test PLAINTEXT socket + sendRequest(plainSocket, 0, serializedBytes) + processRequest(server.requestChannel) + assertEquals(serializedBytes.toSeq, receiveResponse(plainSocket).toSeq) + + // Test TRACE socket + sendRequest(traceSocket, 0, serializedBytes) + processRequest(server.requestChannel) + assertEquals(serializedBytes.toSeq, receiveResponse(traceSocket).toSeq) + } + + @Test + def tooBigRequestIsRejected() { + val tooManyBytes = new Array[Byte](server.maxRequestSize + 1) + new Random().nextBytes(tooManyBytes) + val socket = connect() + sendRequest(socket, 0, tooManyBytes) + try { + receiveResponse(socket) + } catch { + case e: IOException => // thats fine + } + } + + @Test + def testSocketsCloseOnShutdown() { + // open a connection + val plainSocket = connect(protocol = SecurityProtocol.PLAINTEXT) + val traceSocket = connect(protocol = SecurityProtocol.TRACE) + val bytes = new Array[Byte](40) + // send a request first to make sure the connection has been picked up by the socket server + sendRequest(plainSocket, 0, bytes) + sendRequest(traceSocket, 0, bytes) + processRequest(server.requestChannel) + + // make sure the sockets are open + server.acceptors.values.map(acceptor => assertFalse(acceptor.serverChannel.socket.isClosed)) + // then shutdown the server + server.shutdown() + + val largeChunkOfBytes = new Array[Byte](1000000) + // doing a subsequent send should throw an exception as the connection should be closed. + // send a large chunk of bytes to trigger a socket flush + try { + sendRequest(plainSocket, 0, largeChunkOfBytes) + fail("expected exception when writing to closed plain socket") + } catch { + case e: IOException => // expected + } + + try { + sendRequest(traceSocket, 0, largeChunkOfBytes) + fail("expected exception when writing to closed trace socket") + } catch { + case e: IOException => // expected + } + } + + @Test + def testMaxConnectionsPerIp() { + // make the maximum allowable number of connections and then leak them + val conns = (0 until server.maxConnectionsPerIp).map(i => connect()) + // now try one more (should fail) + val conn = connect() + conn.setSoTimeout(3000) + assertEquals(-1, conn.getInputStream().read()) + } + + @Test + def testMaxConnectionsPerIPOverrides(): Unit = { + val overrideNum = 6 + val overrides: Map[String, Int] = Map("localhost" -> overrideNum) + val overrideprops = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0) + val overrideServer: SocketServer = new SocketServer(KafkaConfig.fromProps(overrideprops)) + overrideServer.startup() + // make the maximum allowable number of connections and then leak them + val conns = ((0 until overrideNum).map(i => connect(overrideServer))) + // now try one more (should fail) + val conn = connect(overrideServer) + conn.setSoTimeout(3000) + assertEquals(-1, conn.getInputStream.read()) + overrideServer.shutdown() + } + + @Test + def testSSLSocketServer(): Unit = { + val trustStoreFile = File.createTempFile("truststore", ".jks") + val overrideprops = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0, enableSSL = true, trustStoreFile = Some(trustStoreFile)) + overrideprops.put("listeners", "SSL://localhost:0") + + val overrideServer: SocketServer = new SocketServer(KafkaConfig.fromProps(overrideprops)) + overrideServer.startup() + val sslContext = SSLContext.getInstance("TLSv1.2") + sslContext.init(null, Array(TestUtils.trustAllCerts), new java.security.SecureRandom()) + val socketFactory = sslContext.getSocketFactory + val socket = socketFactory.createSocket("localhost", overrideServer.boundPort(SecurityProtocol.SSL)).asInstanceOf[SSLSocket] + socket.setNeedClientAuth(false) + val bytes = new Array[Byte](40) + sendRequest(socket, 0, bytes) + processRequest(overrideServer.requestChannel) + overrideServer.shutdown() + } +} diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala index 17e9fe4..5b95133 100755 --- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala +++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala @@ -22,6 +22,8 @@ import java.nio._ import java.nio.channels._ import java.util.Random import java.util.Properties +import java.security.cert.X509Certificate +import javax.net.ssl.X509TrustManager import charset.Charset import org.apache.kafka.common.protocol.SecurityProtocol @@ -46,9 +48,14 @@ import kafka.log._ import junit.framework.AssertionFailedError import junit.framework.Assert._ import org.apache.kafka.clients.producer.KafkaProducer +import org.apache.kafka.clients.consumer.KafkaConsumer +import org.apache.kafka.clients.CommonClientConfigs +import org.apache.kafka.common.network.SSLFactory +import org.apache.kafka.common.config.SSLConfigs +import org.apache.kafka.test.TestSSLUtils import scala.collection.Map -import org.apache.kafka.clients.consumer.KafkaConsumer +import scala.collection.JavaConversions._ /** * Utility functions to help with testing @@ -133,24 +140,33 @@ object TestUtils extends Logging { def createBrokerConfigs(numConfigs: Int, zkConnect: String, enableControlledShutdown: Boolean = true, - enableDeleteTopic: Boolean = false): Seq[Properties] = { - (0 until numConfigs).map(node => createBrokerConfig(node, zkConnect, enableControlledShutdown, enableDeleteTopic)) + enableDeleteTopic: Boolean = false, + enableSSL: Boolean = false, + trustStoreFile: Option[File] = None): Seq[Properties] = { + (0 until numConfigs).map(node => createBrokerConfig(node, zkConnect, enableControlledShutdown, enableDeleteTopic, enableSSL = enableSSL, trustStoreFile = trustStoreFile)) } def getBrokerListStrFromServers(servers: Seq[KafkaServer]): String = { servers.map(s => formatAddress(s.config.hostName, s.boundPort())).mkString(",") } + def getSSLBrokerListStrFromServers(servers: Seq[KafkaServer]): String = { + servers.map(s => formatAddress(s.config.hostName, s.boundPort(SecurityProtocol.SSL))).mkString(",") + } + /** * Create a test config for the given node id */ def createBrokerConfig(nodeId: Int, zkConnect: String, enableControlledShutdown: Boolean = true, enableDeleteTopic: Boolean = false, - port: Int = RandomPort): Properties = { + port: Int = RandomPort, enableSSL: Boolean = false, trustStoreFile: Option[File] = None): Properties = { val props = new Properties + var listeners: String = "PLAINTEXT://localhost:"+port.toString if (nodeId >= 0) props.put("broker.id", nodeId.toString) - props.put("listeners", "PLAINTEXT://localhost:"+port.toString) + if (enableSSL) + listeners = listeners + "," + "SSL://localhost:"+port.toString + props.put("listeners", listeners) props.put("log.dir", TestUtils.tempDir().getAbsolutePath) props.put("zookeeper.connect", zkConnect) props.put("replica.socket.timeout.ms", "1500") @@ -158,6 +174,9 @@ object TestUtils extends Logging { props.put("controlled.shutdown.enable", enableControlledShutdown.toString) props.put("delete.topic.enable", enableDeleteTopic.toString) props.put("controlled.shutdown.retry.backoff.ms", "100") + if (enableSSL) { + props.putAll(addSSLConfigs(SSLFactory.Mode.SERVER, true, trustStoreFile, "server"+nodeId)) + } props } @@ -381,7 +400,9 @@ object TestUtils extends Logging { blockOnBufferFull: Boolean = true, bufferSize: Long = 1024L * 1024L, retries: Int = 0, - lingerMs: Long = 0) : KafkaProducer[Array[Byte],Array[Byte]] = { + lingerMs: Long = 0, + enableSSL: Boolean = false, + trustStoreFile: Option[File] = None) : KafkaProducer[Array[Byte],Array[Byte]] = { import org.apache.kafka.clients.producer.ProducerConfig val producerProps = new Properties() @@ -396,6 +417,10 @@ object TestUtils extends Logging { producerProps.put(ProducerConfig.LINGER_MS_CONFIG, lingerMs.toString) producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer") producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer") + if (enableSSL) { + producerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL") + producerProps.putAll(addSSLConfigs(SSLFactory.Mode.CLIENT, false, trustStoreFile, "producer")) + } new KafkaProducer[Array[Byte],Array[Byte]](producerProps) } @@ -873,6 +898,37 @@ object TestUtils extends Logging { new String(bytes, encoding) } + def addSSLConfigs(mode: SSLFactory.Mode, clientCert: Boolean, trustStoreFile: Option[File], certAlias: String): Properties = { + var sslConfigs: java.util.Map[String, Object] = new java.util.HashMap[String, Object]() + if (!trustStoreFile.isDefined) { + throw new Exception("enableSSL set to true but no trustStoreFile provided") + } + if (mode == SSLFactory.Mode.SERVER) + sslConfigs = TestSSLUtils.createSSLConfig(true, true, mode, trustStoreFile.get, certAlias) + else + sslConfigs = TestSSLUtils.createSSLConfig(false, false, mode, trustStoreFile.get, certAlias) + + val sslProps = new Properties() + sslConfigs.foreach(kv => + sslProps.put(kv._1, kv._2) + ) + sslProps + } + + // a X509TrustManager to trust self-signed certs for unit tests. + def trustAllCerts: X509TrustManager = { + val trustManager = new X509TrustManager() { + override def getAcceptedIssuers: Array[X509Certificate] = { + null + } + override def checkClientTrusted(certs: Array[X509Certificate], authType: String) { + } + override def checkServerTrusted(certs: Array[X509Certificate], authType: String) { + } + } + trustManager + } + } class IntEncoder(props: VerifiableProperties = null) extends Encoder[Int] { -- 2.4.6 From eda92cb5f9d2ae749903eac5453a6fdb49685964 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Sat, 20 Jun 2015 20:01:30 -0700 Subject: [PATCH 16/30] KAFKA-1690. Added SSLProducerSendTest and fixes to get right port for SSL. --- .../common/network/PlainTextTransportLayer.java | 1 - .../main/scala/kafka/network/SocketServer.scala | 4 +- core/src/main/scala/kafka/server/KafkaServer.scala | 2 +- .../kafka/api/SSLProducerSendTest.scala | 326 ++++++++++++++++++++- .../test/scala/unit/kafka/utils/TestUtils.scala | 4 +- 5 files changed, 327 insertions(+), 10 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java index 2390f03..1209670 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java @@ -95,7 +95,6 @@ public class PlainTextTransportLayer implements TransportLayer { */ public int read(ByteBuffer dst) throws IOException { - System.out.println("in read " + dst.remaining()); return socketChannel.read(dst); } diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index a09ed84..cb125c2 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -238,7 +238,6 @@ private[kafka] class Acceptor(val host: String, } ) - println("processorIndex " + processorIndex + " numProcessorThreads " + numProcessorThreads) this.synchronized { for (i <- processorIndex until numProcessorThreads) { processors(i) = new Processor(i, @@ -281,7 +280,6 @@ private[kafka] class Acceptor(val host: String, // round robin to the next processor thread currentProcessor = (currentProcessor + 1) % numProcessorThreads if (currentProcessor < processorIndex) currentProcessor = processorIndex - println("current Processor " + currentProcessor + " protocol " + protocol) } catch { case e: Throwable => error("Error while accepting connection", e) } @@ -453,7 +451,7 @@ private[kafka] class Processor(val id: Int, selector.unmute(curr.request.connectionId) } case RequestChannel.SendAction => { - println("Socket server received response to send, registering for write and sending data: " + protocol + " id " + id) + trace("Socket server received response to send, registering for write and sending data: " + curr) selector.send(curr.responseSend) inflightResponses += (curr.request.connectionId -> curr) } diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala index 4977ba6..1f2f59e 100755 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ b/core/src/main/scala/kafka/server/KafkaServer.scala @@ -153,7 +153,7 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg /* tell everyone we are alive */ val listeners = config.advertisedListeners.map {case(protocol, endpoint) => if (endpoint.port == 0) - (protocol, EndPoint(endpoint.host, socketServer.boundPort(), endpoint.protocolType)) + (protocol, EndPoint(endpoint.host, socketServer.boundPort(protocol), endpoint.protocolType)) else (protocol, endpoint) } diff --git a/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala b/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala index 2208cca..73e2d2c 100644 --- a/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala +++ b/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala @@ -36,7 +36,7 @@ import org.scalatest.junit.JUnit3Suite class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { - val numServers = 1 + val numServers = 2 val trustStoreFile = File.createTempFile("truststore", ".jks") val overridingProps = new Properties() overridingProps.put(KafkaConfig.NumPartitionsProp, 4.toString) @@ -54,7 +54,7 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { // TODO: we need to migrate to new consumers when 0.9 is final consumer1 = new SimpleConsumer("localhost", servers(0).boundPort(), 100, 1024*1024, "") - consumer2 = new SimpleConsumer("localhost", servers(0).boundPort(), 100, 1024*1024, "") + consumer2 = new SimpleConsumer("localhost", servers(1).boundPort(), 100, 1024*1024, "") } @@ -91,12 +91,206 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { try { // create topic - TestUtils.createTopic(zkClient, topic, 1, 1, servers) + TestUtils.createTopic(zkClient, topic, 1, 2, servers) // send a normal record val record0 = new ProducerRecord[Array[Byte],Array[Byte]](topic, partition, "key".getBytes, "value".getBytes) assertEquals("Should have offset 0", 0L, producer.send(record0, callback).get.offset) + // send a record with null value should be ok + val record1 = new ProducerRecord[Array[Byte],Array[Byte]](topic, partition, "key".getBytes, null) + assertEquals("Should have offset 1", 1L, producer.send(record1, callback).get.offset) + + // send a record with null key should be ok + val record2 = new ProducerRecord[Array[Byte],Array[Byte]](topic, partition, null, "value".getBytes) + assertEquals("Should have offset 2", 2L, producer.send(record2, callback).get.offset) + + // send a record with null part id should be ok + val record3 = new ProducerRecord[Array[Byte],Array[Byte]](topic, null, "key".getBytes, "value".getBytes) + assertEquals("Should have offset 3", 3L, producer.send(record3, callback).get.offset) + + + // send a record with null topic should fail + try { + val record4 = new ProducerRecord[Array[Byte],Array[Byte]](null, partition, "key".getBytes, "value".getBytes) + producer.send(record4, callback) + fail("Should not allow sending a record without topic") + } catch { + case iae: IllegalArgumentException => // this is ok + case e: Throwable => fail("Only expecting IllegalArgumentException", e) + } + + // // non-blocking send a list of records + // for (i <- 1 to numRecords) + // producer.send(record0, callback) + + // // check that all messages have been acked via offset + // assertEquals("Should have offset " + (numRecords + 4), numRecords + 4L, producer.send(record0, callback).get.offset) + + } finally { + if (producer != null) { + producer.close() + producer = null + } + } + } + + @Test + def testSerializer() { + // send a record with a wrong type should receive a serialization exception + try { + val producer = createNewProducerWithWrongSerializer(brokerList) + val record5 = new ProducerRecord[Array[Byte],Array[Byte]](topic, new Integer(0), "key".getBytes, "value".getBytes) + producer.send(record5) + fail("Should have gotten a SerializationException") + } catch { + case se: SerializationException => // this is ok + } + + try { + createNewProducerWithNoSerializer(brokerList) + fail("Instantiating a producer without specifying a serializer should cause a ConfigException") + } catch { + case ce : ConfigException => // this is ok + } + + // create a producer with explicit serializers should succeed + createNewProducerWithExplicitSerializer(brokerList) + } + + private def createNewProducerWithWrongSerializer(brokerList: String) : KafkaProducer[Array[Byte],Array[Byte]] = { + import org.apache.kafka.clients.producer.ProducerConfig + + val producerProps = new Properties() + producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList) + producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer") + producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer") + return new KafkaProducer[Array[Byte],Array[Byte]](producerProps) + } + + private def createNewProducerWithNoSerializer(brokerList: String) : KafkaProducer[Array[Byte],Array[Byte]] = { + import org.apache.kafka.clients.producer.ProducerConfig + + val producerProps = new Properties() + producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList) + return new KafkaProducer[Array[Byte],Array[Byte]](producerProps) + } + + private def createNewProducerWithExplicitSerializer(brokerList: String) : KafkaProducer[Array[Byte],Array[Byte]] = { + import org.apache.kafka.clients.producer.ProducerConfig + + val producerProps = new Properties() + producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList) + return new KafkaProducer[Array[Byte],Array[Byte]](producerProps, new ByteArraySerializer, new ByteArraySerializer) + } + + /** + * testClose checks the closing behavior + * + * After close() returns, all messages should be sent with correct returned offset metadata + */ + @Test + def testClose() { + var producer = TestUtils.createNewProducer(brokerList) + + try { + // create topic + TestUtils.createTopic(zkClient, topic, 1, 2, servers) + + // non-blocking send a list of records + val record0 = new ProducerRecord[Array[Byte],Array[Byte]](topic, null, "key".getBytes, "value".getBytes) + for (i <- 1 to numRecords) + producer.send(record0) + val response0 = producer.send(record0) + + // close the producer + producer.close() + producer = null + + // check that all messages have been acked via offset, + // this also checks that messages with same key go to the same partition + assertTrue("The last message should be acked before producer is shutdown", response0.isDone) + assertEquals("Should have offset " + numRecords, numRecords.toLong, response0.get.offset) + + } finally { + if (producer != null) { + producer.close() + producer = null + } + } + } + + /** + * testSendToPartition checks the partitioning behavior + * + * The specified partition-id should be respected + */ + @Test + def testSendToPartition() { + var producer = TestUtils.createNewProducer(brokerList) + + try { + // create topic + val leaders = TestUtils.createTopic(zkClient, topic, 2, 2, servers) + val partition = 1 + + // make sure leaders exist + val leader1 = leaders(partition) + assertTrue("Leader for topic \"topic\" partition 1 should exist", leader1.isDefined) + + val responses = + for (i <- 1 to numRecords) + yield producer.send(new ProducerRecord[Array[Byte],Array[Byte]](topic, partition, null, ("value" + i).getBytes)) + val futures = responses.toList + futures.map(_.get) + for (future <- futures) + assertTrue("Request should have completed", future.isDone) + + // make sure all of them end up in the same partition with increasing offset values + for ((future, offset) <- futures zip (0 until numRecords)) { + assertEquals(offset.toLong, future.get.offset) + assertEquals(topic, future.get.topic) + assertEquals(partition, future.get.partition) + } + + // make sure the fetched messages also respect the partitioning and ordering + val fetchResponse1 = if(leader1.get == configs(0).brokerId) { + consumer1.fetch(new FetchRequestBuilder().addFetch(topic, partition, 0, Int.MaxValue).build()) + } else { + consumer2.fetch(new FetchRequestBuilder().addFetch(topic, partition, 0, Int.MaxValue).build()) + } + val messageSet1 = fetchResponse1.messageSet(topic, partition).iterator.toBuffer + assertEquals("Should have fetched " + numRecords + " messages", numRecords, messageSet1.size) + + // TODO: also check topic and partition after they are added in the return messageSet + for (i <- 0 to numRecords - 1) { + assertEquals(new Message(bytes = ("value" + (i + 1)).getBytes), messageSet1(i).message) + assertEquals(i.toLong, messageSet1(i).offset) + } + } finally { + if (producer != null) { + producer.close() + producer = null + } + } + } + + /** + * testAutoCreateTopic + * + * The topic should be created upon sending the first message + */ + @Test + def testAutoCreateTopic() { + var producer = TestUtils.createNewProducer(brokerList, retries = 5) + + try { + // Send a message to auto-create the topic + val record = new ProducerRecord[Array[Byte],Array[Byte]](topic, null, "key".getBytes, "value".getBytes) + assertEquals("Should have offset 0", 0L, producer.send(record).get.offset) + + // double check that the topic is created with leader elected + TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, 0) } finally { if (producer != null) { @@ -105,4 +299,130 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { } } } + + /** + * Test that flush immediately sends all accumulated requests. + */ + @Test + def testFlush() { + var producer = TestUtils.createNewProducer(brokerList, lingerMs = Long.MaxValue) + try { + TestUtils.createTopic(zkClient, topic, 2, 2, servers) + val record = new ProducerRecord[Array[Byte], Array[Byte]](topic, "value".getBytes) + for(i <- 0 until 50) { + val responses = (0 until numRecords) map (i => producer.send(record)) + assertTrue("No request is complete.", responses.forall(!_.isDone())) + producer.flush() + assertTrue("All requests are complete.", responses.forall(_.isDone())) + } + } finally { + if (producer != null) + producer.close() + } + } + + /** + * Test close with zero timeout from caller thread + */ + @Test + def testCloseWithZeroTimeoutFromCallerThread() { + var producer: KafkaProducer[Array[Byte],Array[Byte]] = null + try { + // create topic + val leaders = TestUtils.createTopic(zkClient, topic, 2, 2, servers) + val leader0 = leaders(0) + val leader1 = leaders(1) + + // create record + val record0 = new ProducerRecord[Array[Byte], Array[Byte]](topic, 0, null, "value".getBytes) + val record1 = new ProducerRecord[Array[Byte], Array[Byte]](topic, 1, null, "value".getBytes) + + // Test closing from caller thread. + for(i <- 0 until 50) { + producer = TestUtils.createNewProducer(brokerList, lingerMs = Long.MaxValue) + val responses = (0 until numRecords) map (i => producer.send(record0)) + assertTrue("No request is complete.", responses.forall(!_.isDone())) + producer.close(0, TimeUnit.MILLISECONDS) + responses.foreach { future => + try { + future.get() + fail("No message should be sent successfully.") + } catch { + case e: Exception => + assertEquals("java.lang.IllegalStateException: Producer is closed forcefully.", e.getMessage) + } + } + val fetchResponse = if (leader0.get == configs(0).brokerId) { + consumer1.fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, Int.MaxValue).build()) + } else { + consumer2.fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, Int.MaxValue).build()) + } + assertEquals("Fetch response should have no message returned.", 0, fetchResponse.messageSet(topic, 0).size) + } + } finally { + if (producer != null) + producer.close() + } + } + + /** + * Test close with zero and non-zero timeout from sender thread + */ + @Test + def testCloseWithZeroTimeoutFromSenderThread() { + var producer: KafkaProducer[Array[Byte],Array[Byte]] = null + try { + // create topic + val leaders = TestUtils.createTopic(zkClient, topic, 2, 2, servers) + val leader0 = leaders(0) + val leader1 = leaders(1) + + // create record + val record0 = new ProducerRecord[Array[Byte], Array[Byte]](topic, 0, null, "value".getBytes) + val record1 = new ProducerRecord[Array[Byte], Array[Byte]](topic, 1, null, "value".getBytes) + + // Test closing from sender thread. + class CloseCallback(producer: KafkaProducer[Array[Byte], Array[Byte]]) extends Callback { + override def onCompletion(metadata: RecordMetadata, exception: Exception) { + // Trigger another batch in accumulator before close the producer. These messages should + // not be sent. + (0 until numRecords) map (i => producer.send(record1)) + // The close call will be called by all the message callbacks. This tests idempotence of the close call. + producer.close(0, TimeUnit.MILLISECONDS) + // Test close with non zero timeout. Should not block at all. + producer.close(Long.MaxValue, TimeUnit.MICROSECONDS) + } + } + for(i <- 0 until 50) { + producer = TestUtils.createNewProducer(brokerList, lingerMs = Long.MaxValue) + // send message to partition 0 + var responses = (0 until numRecords) map (i => producer.send(record0)) + // send message to partition 1 + responses ++= ((0 until numRecords) map (i => producer.send(record1, new CloseCallback(producer)))) + assertTrue("No request is complete.", responses.forall(!_.isDone())) + // flush the messages. + producer.flush() + assertTrue("All request are complete.", responses.forall(_.isDone())) + // Check the messages received by broker. + val fetchResponse0 = if (leader0.get == configs(0).brokerId) { + consumer1.fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, Int.MaxValue).build()) + } else { + consumer2.fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, Int.MaxValue).build()) + } + val fetchResponse1 = if (leader1.get == configs(0).brokerId) { + consumer1.fetch(new FetchRequestBuilder().addFetch(topic, 1, 0, Int.MaxValue).build()) + } else { + consumer2.fetch(new FetchRequestBuilder().addFetch(topic, 1, 0, Int.MaxValue).build()) + } + val expectedNumRecords = (i + 1) * numRecords + assertEquals("Fetch response to partition 0 should have %d messages.".format(expectedNumRecords), + expectedNumRecords, fetchResponse0.messageSet(topic, 0).size) + assertEquals("Fetch response to partition 1 should have %d messages.".format(expectedNumRecords), + expectedNumRecords, fetchResponse1.messageSet(topic, 1).size) + } + } finally { + if (producer != null) + producer.close() + } + } } diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala index 5b95133..7d5cbd1 100755 --- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala +++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala @@ -160,12 +160,12 @@ object TestUtils extends Logging { def createBrokerConfig(nodeId: Int, zkConnect: String, enableControlledShutdown: Boolean = true, enableDeleteTopic: Boolean = false, - port: Int = RandomPort, enableSSL: Boolean = false, trustStoreFile: Option[File] = None): Properties = { + port: Int = RandomPort, enableSSL: Boolean = false, sslPort: Int = RandomPort, trustStoreFile: Option[File] = None): Properties = { val props = new Properties var listeners: String = "PLAINTEXT://localhost:"+port.toString if (nodeId >= 0) props.put("broker.id", nodeId.toString) if (enableSSL) - listeners = listeners + "," + "SSL://localhost:"+port.toString + listeners = listeners + "," + "SSL://localhost:"+sslPort.toString props.put("listeners", listeners) props.put("log.dir", TestUtils.tempDir().getAbsolutePath) props.put("zookeeper.connect", zkConnect) -- 2.4.6 From f60c95273b3b814792d0da9264a75939049dcc5f Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Sat, 20 Jun 2015 21:45:58 -0700 Subject: [PATCH 17/30] KAFKA-1690. Post merge fixes. --- .../org/apache/kafka/common/network/Selector.java | 1 - .../apache/kafka/common/network/SelectorTest.java | 3 +- core/src/main/scala/kafka/server/KafkaConfig.scala | 253 +++++++++++---------- 3 files changed, 137 insertions(+), 120 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index 6a04166..0bd2e1e 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -266,7 +266,6 @@ public class Selector implements Selectable { channel.finishConnect(); this.connected.add(channel.id()); this.sensors.connectionCreated.record(); - log.debug("Connection {} created", transmissions.id); } /* if channel is not ready finish prepare */ diff --git a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java index 4664156..8ec5bed 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java @@ -51,9 +51,10 @@ public class SelectorTest { configs.put(SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, Class.forName(SSLConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS)); this.server = new EchoServer(configs); this.server.start(); + this.time = new MockTime(); this.channelBuilder = new PlainTextChannelBuilder(); this.channelBuilder.configure(configs); - this.selector = new Selector(5000, new Metrics(), new MockTime() , "MetricGroup", new LinkedHashMap(), channelBuilder); + this.selector = new Selector(5000, new Metrics(), time, "MetricGroup", new LinkedHashMap(), channelBuilder); } @After diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 4c03748..755fb85 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -464,136 +464,136 @@ object KafkaConfig { /** ********* Zookeeper Configuration ***********/ .define(ZkConnectProp, STRING, HIGH, ZkConnectDoc) - .define(ZkSessionTimeoutMsProp, INT, Defaults.ZkSessionTimeoutMs, HIGH, ZkSessionTimeoutMsDoc) - .define(ZkConnectionTimeoutMsProp, INT, HIGH, ZkConnectionTimeoutMsDoc, false) - .define(ZkSyncTimeMsProp, INT, Defaults.ZkSyncTimeMs, LOW, ZkSyncTimeMsDoc) + .define(ZkSessionTimeoutMsProp, INT, Defaults.ZkSessionTimeoutMs, HIGH, ZkSessionTimeoutMsDoc) + .define(ZkConnectionTimeoutMsProp, INT, HIGH, ZkConnectionTimeoutMsDoc, false) + .define(ZkSyncTimeMsProp, INT, Defaults.ZkSyncTimeMs, LOW, ZkSyncTimeMsDoc) /** ********* General Configuration ***********/ .define(MaxReservedBrokerIdProp, INT, Defaults.MaxReservedBrokerId, atLeast(0), MEDIUM, MaxReservedBrokerIdProp) - .define(BrokerIdProp, INT, Defaults.BrokerId, HIGH, BrokerIdDoc) - .define(MessageMaxBytesProp, INT, Defaults.MessageMaxBytes, atLeast(0), HIGH, MessageMaxBytesDoc) - .define(NumNetworkThreadsProp, INT, Defaults.NumNetworkThreads, atLeast(1), HIGH, NumNetworkThreadsDoc) - .define(NumIoThreadsProp, INT, Defaults.NumIoThreads, atLeast(1), HIGH, NumIoThreadsDoc) - .define(BackgroundThreadsProp, INT, Defaults.BackgroundThreads, atLeast(1), HIGH, BackgroundThreadsDoc) - .define(QueuedMaxRequestsProp, INT, Defaults.QueuedMaxRequests, atLeast(1), HIGH, QueuedMaxRequestsDoc) + .define(BrokerIdProp, INT, Defaults.BrokerId, HIGH, BrokerIdDoc) + .define(MessageMaxBytesProp, INT, Defaults.MessageMaxBytes, atLeast(0), HIGH, MessageMaxBytesDoc) + .define(NumNetworkThreadsProp, INT, Defaults.NumNetworkThreads, atLeast(1), HIGH, NumNetworkThreadsDoc) + .define(NumIoThreadsProp, INT, Defaults.NumIoThreads, atLeast(1), HIGH, NumIoThreadsDoc) + .define(BackgroundThreadsProp, INT, Defaults.BackgroundThreads, atLeast(1), HIGH, BackgroundThreadsDoc) + .define(QueuedMaxRequestsProp, INT, Defaults.QueuedMaxRequests, atLeast(1), HIGH, QueuedMaxRequestsDoc) /** ********* Socket Server Configuration ***********/ .define(PortProp, INT, Defaults.Port, HIGH, PortDoc) - .define(HostNameProp, STRING, Defaults.HostName, HIGH, HostNameDoc) - .define(ListenersProp, STRING, HIGH, ListenersDoc, false) - .define(AdvertisedHostNameProp, STRING, HIGH, AdvertisedHostNameDoc, false) - .define(AdvertisedPortProp, INT, HIGH, AdvertisedPortDoc, false) - .define(AdvertisedListenersProp, STRING, HIGH, AdvertisedListenersDoc, false) - .define(SocketSendBufferBytesProp, INT, Defaults.SocketSendBufferBytes, HIGH, SocketSendBufferBytesDoc) - .define(SocketReceiveBufferBytesProp, INT, Defaults.SocketReceiveBufferBytes, HIGH, SocketReceiveBufferBytesDoc) - .define(SocketRequestMaxBytesProp, INT, Defaults.SocketRequestMaxBytes, atLeast(1), HIGH, SocketRequestMaxBytesDoc) - .define(MaxConnectionsPerIpProp, INT, Defaults.MaxConnectionsPerIp, atLeast(1), MEDIUM, MaxConnectionsPerIpDoc) - .define(MaxConnectionsPerIpOverridesProp, STRING, Defaults.MaxConnectionsPerIpOverrides, MEDIUM, MaxConnectionsPerIpOverridesDoc) - .define(ConnectionsMaxIdleMsProp, LONG, Defaults.ConnectionsMaxIdleMs, MEDIUM, ConnectionsMaxIdleMsDoc) + .define(HostNameProp, STRING, Defaults.HostName, HIGH, HostNameDoc) + .define(ListenersProp, STRING, HIGH, ListenersDoc, false) + .define(AdvertisedHostNameProp, STRING, HIGH, AdvertisedHostNameDoc, false) + .define(AdvertisedPortProp, INT, HIGH, AdvertisedPortDoc, false) + .define(AdvertisedListenersProp, STRING, HIGH, AdvertisedListenersDoc, false) + .define(SocketSendBufferBytesProp, INT, Defaults.SocketSendBufferBytes, HIGH, SocketSendBufferBytesDoc) + .define(SocketReceiveBufferBytesProp, INT, Defaults.SocketReceiveBufferBytes, HIGH, SocketReceiveBufferBytesDoc) + .define(SocketRequestMaxBytesProp, INT, Defaults.SocketRequestMaxBytes, atLeast(1), HIGH, SocketRequestMaxBytesDoc) + .define(MaxConnectionsPerIpProp, INT, Defaults.MaxConnectionsPerIp, atLeast(1), MEDIUM, MaxConnectionsPerIpDoc) + .define(MaxConnectionsPerIpOverridesProp, STRING, Defaults.MaxConnectionsPerIpOverrides, MEDIUM, MaxConnectionsPerIpOverridesDoc) + .define(ConnectionsMaxIdleMsProp, LONG, Defaults.ConnectionsMaxIdleMs, MEDIUM, ConnectionsMaxIdleMsDoc) /** ********* Log Configuration ***********/ .define(NumPartitionsProp, INT, Defaults.NumPartitions, atLeast(1), MEDIUM, NumPartitionsDoc) - .define(LogDirProp, STRING, Defaults.LogDir, HIGH, LogDirDoc) - .define(LogDirsProp, STRING, HIGH, LogDirsDoc, false) - .define(LogSegmentBytesProp, INT, Defaults.LogSegmentBytes, atLeast(Message.MinHeaderSize), HIGH, LogSegmentBytesDoc) + .define(LogDirProp, STRING, Defaults.LogDir, HIGH, LogDirDoc) + .define(LogDirsProp, STRING, HIGH, LogDirsDoc, false) + .define(LogSegmentBytesProp, INT, Defaults.LogSegmentBytes, atLeast(Message.MinHeaderSize), HIGH, LogSegmentBytesDoc) .define(LogRollTimeMillisProp, LONG, HIGH, LogRollTimeMillisDoc, false) - .define(LogRollTimeHoursProp, INT, Defaults.LogRollHours, atLeast(1), HIGH, LogRollTimeHoursDoc) + .define(LogRollTimeHoursProp, INT, Defaults.LogRollHours, atLeast(1), HIGH, LogRollTimeHoursDoc) .define(LogRollTimeJitterMillisProp, LONG, HIGH, LogRollTimeJitterMillisDoc, false) - .define(LogRollTimeJitterHoursProp, INT, Defaults.LogRollJitterHours, atLeast(0), HIGH, LogRollTimeJitterHoursDoc) + .define(LogRollTimeJitterHoursProp, INT, Defaults.LogRollJitterHours, atLeast(0), HIGH, LogRollTimeJitterHoursDoc) .define(LogRetentionTimeMillisProp, LONG, HIGH, LogRetentionTimeMillisDoc, false) - .define(LogRetentionTimeMinutesProp, INT, HIGH, LogRetentionTimeMinsDoc, false) - .define(LogRetentionTimeHoursProp, INT, Defaults.LogRetentionHours, HIGH, LogRetentionTimeHoursDoc) + .define(LogRetentionTimeMinutesProp, INT, HIGH, LogRetentionTimeMinsDoc, false) + .define(LogRetentionTimeHoursProp, INT, Defaults.LogRetentionHours, HIGH, LogRetentionTimeHoursDoc) .define(LogRetentionBytesProp, LONG, Defaults.LogRetentionBytes, HIGH, LogRetentionBytesDoc) - .define(LogCleanupIntervalMsProp, LONG, Defaults.LogCleanupIntervalMs, atLeast(1), MEDIUM, LogCleanupIntervalMsDoc) - .define(LogCleanupPolicyProp, STRING, Defaults.LogCleanupPolicy, in(Defaults.Compact, Defaults.Delete), MEDIUM, LogCleanupPolicyDoc) - .define(LogCleanerThreadsProp, INT, Defaults.LogCleanerThreads, atLeast(0), MEDIUM, LogCleanerThreadsDoc) - .define(LogCleanerIoMaxBytesPerSecondProp, DOUBLE, Defaults.LogCleanerIoMaxBytesPerSecond, MEDIUM, LogCleanerIoMaxBytesPerSecondDoc) - .define(LogCleanerDedupeBufferSizeProp, LONG, Defaults.LogCleanerDedupeBufferSize, MEDIUM, LogCleanerDedupeBufferSizeDoc) - .define(LogCleanerIoBufferSizeProp, INT, Defaults.LogCleanerIoBufferSize, atLeast(0), MEDIUM, LogCleanerIoBufferSizeDoc) - .define(LogCleanerDedupeBufferLoadFactorProp, DOUBLE, Defaults.LogCleanerDedupeBufferLoadFactor, MEDIUM, LogCleanerDedupeBufferLoadFactorDoc) - .define(LogCleanerBackoffMsProp, LONG, Defaults.LogCleanerBackoffMs, atLeast(0), MEDIUM, LogCleanerBackoffMsDoc) - .define(LogCleanerMinCleanRatioProp, DOUBLE, Defaults.LogCleanerMinCleanRatio, MEDIUM, LogCleanerMinCleanRatioDoc) - .define(LogCleanerEnableProp, BOOLEAN, Defaults.LogCleanerEnable, MEDIUM, LogCleanerEnableDoc) - .define(LogCleanerDeleteRetentionMsProp, LONG, Defaults.LogCleanerDeleteRetentionMs, MEDIUM, LogCleanerDeleteRetentionMsDoc) - .define(LogIndexSizeMaxBytesProp, INT, Defaults.LogIndexSizeMaxBytes, atLeast(4), MEDIUM, LogIndexSizeMaxBytesDoc) - .define(LogIndexIntervalBytesProp, INT, Defaults.LogIndexIntervalBytes, atLeast(0), MEDIUM, LogIndexIntervalBytesDoc) - .define(LogFlushIntervalMessagesProp, LONG, Defaults.LogFlushIntervalMessages, atLeast(1), HIGH, LogFlushIntervalMessagesDoc) - .define(LogDeleteDelayMsProp, LONG, Defaults.LogDeleteDelayMs, atLeast(0), HIGH, LogDeleteDelayMsDoc) - .define(LogFlushSchedulerIntervalMsProp, LONG, Defaults.LogFlushSchedulerIntervalMs, HIGH, LogFlushSchedulerIntervalMsDoc) - .define(LogFlushIntervalMsProp, LONG, HIGH, LogFlushIntervalMsDoc, false) - .define(LogFlushOffsetCheckpointIntervalMsProp, INT, Defaults.LogFlushOffsetCheckpointIntervalMs, atLeast(0), HIGH, LogFlushOffsetCheckpointIntervalMsDoc) - .define(LogPreAllocateProp, BOOLEAN, Defaults.LogPreAllocateEnable, MEDIUM, LogPreAllocateEnableDoc) - .define(NumRecoveryThreadsPerDataDirProp, INT, Defaults.NumRecoveryThreadsPerDataDir, atLeast(1), HIGH, NumRecoveryThreadsPerDataDirDoc) - .define(AutoCreateTopicsEnableProp, BOOLEAN, Defaults.AutoCreateTopicsEnable, HIGH, AutoCreateTopicsEnableDoc) - .define(MinInSyncReplicasProp, INT, Defaults.MinInSyncReplicas, atLeast(1), HIGH, MinInSyncReplicasDoc) + .define(LogCleanupIntervalMsProp, LONG, Defaults.LogCleanupIntervalMs, atLeast(1), MEDIUM, LogCleanupIntervalMsDoc) + .define(LogCleanupPolicyProp, STRING, Defaults.LogCleanupPolicy, in(Defaults.Compact, Defaults.Delete), MEDIUM, LogCleanupPolicyDoc) + .define(LogCleanerThreadsProp, INT, Defaults.LogCleanerThreads, atLeast(0), MEDIUM, LogCleanerThreadsDoc) + .define(LogCleanerIoMaxBytesPerSecondProp, DOUBLE, Defaults.LogCleanerIoMaxBytesPerSecond, MEDIUM, LogCleanerIoMaxBytesPerSecondDoc) + .define(LogCleanerDedupeBufferSizeProp, LONG, Defaults.LogCleanerDedupeBufferSize, MEDIUM, LogCleanerDedupeBufferSizeDoc) + .define(LogCleanerIoBufferSizeProp, INT, Defaults.LogCleanerIoBufferSize, atLeast(0), MEDIUM, LogCleanerIoBufferSizeDoc) + .define(LogCleanerDedupeBufferLoadFactorProp, DOUBLE, Defaults.LogCleanerDedupeBufferLoadFactor, MEDIUM, LogCleanerDedupeBufferLoadFactorDoc) + .define(LogCleanerBackoffMsProp, LONG, Defaults.LogCleanerBackoffMs, atLeast(0), MEDIUM, LogCleanerBackoffMsDoc) + .define(LogCleanerMinCleanRatioProp, DOUBLE, Defaults.LogCleanerMinCleanRatio, MEDIUM, LogCleanerMinCleanRatioDoc) + .define(LogCleanerEnableProp, BOOLEAN, Defaults.LogCleanerEnable, MEDIUM, LogCleanerEnableDoc) + .define(LogCleanerDeleteRetentionMsProp, LONG, Defaults.LogCleanerDeleteRetentionMs, MEDIUM, LogCleanerDeleteRetentionMsDoc) + .define(LogIndexSizeMaxBytesProp, INT, Defaults.LogIndexSizeMaxBytes, atLeast(4), MEDIUM, LogIndexSizeMaxBytesDoc) + .define(LogIndexIntervalBytesProp, INT, Defaults.LogIndexIntervalBytes, atLeast(0), MEDIUM, LogIndexIntervalBytesDoc) + .define(LogFlushIntervalMessagesProp, LONG, Defaults.LogFlushIntervalMessages, atLeast(1), HIGH, LogFlushIntervalMessagesDoc) + .define(LogDeleteDelayMsProp, LONG, Defaults.LogDeleteDelayMs, atLeast(0), HIGH, LogDeleteDelayMsDoc) + .define(LogFlushSchedulerIntervalMsProp, LONG, Defaults.LogFlushSchedulerIntervalMs, HIGH, LogFlushSchedulerIntervalMsDoc) + .define(LogFlushIntervalMsProp, LONG, HIGH, LogFlushIntervalMsDoc, false) + .define(LogFlushOffsetCheckpointIntervalMsProp, INT, Defaults.LogFlushOffsetCheckpointIntervalMs, atLeast(0), HIGH, LogFlushOffsetCheckpointIntervalMsDoc) + .define(LogPreAllocateProp, BOOLEAN, Defaults.LogPreAllocateEnable, MEDIUM, LogPreAllocateEnableDoc) + .define(NumRecoveryThreadsPerDataDirProp, INT, Defaults.NumRecoveryThreadsPerDataDir, atLeast(1), HIGH, NumRecoveryThreadsPerDataDirDoc) + .define(AutoCreateTopicsEnableProp, BOOLEAN, Defaults.AutoCreateTopicsEnable, HIGH, AutoCreateTopicsEnableDoc) + .define(MinInSyncReplicasProp, INT, Defaults.MinInSyncReplicas, atLeast(1), HIGH, MinInSyncReplicasDoc) /** ********* Replication configuration ***********/ .define(ControllerSocketTimeoutMsProp, INT, Defaults.ControllerSocketTimeoutMs, MEDIUM, ControllerSocketTimeoutMsDoc) - .define(DefaultReplicationFactorProp, INT, Defaults.DefaultReplicationFactor, MEDIUM, DefaultReplicationFactorDoc) - .define(ReplicaLagTimeMaxMsProp, LONG, Defaults.ReplicaLagTimeMaxMs, HIGH, ReplicaLagTimeMaxMsDoc) - .define(ReplicaSocketTimeoutMsProp, INT, Defaults.ReplicaSocketTimeoutMs, HIGH, ReplicaSocketTimeoutMsDoc) - .define(ReplicaSocketReceiveBufferBytesProp, INT, Defaults.ReplicaSocketReceiveBufferBytes, HIGH, ReplicaSocketReceiveBufferBytesDoc) - .define(ReplicaFetchMaxBytesProp, INT, Defaults.ReplicaFetchMaxBytes, HIGH, ReplicaFetchMaxBytesDoc) - .define(ReplicaFetchWaitMaxMsProp, INT, Defaults.ReplicaFetchWaitMaxMs, HIGH, ReplicaFetchWaitMaxMsDoc) - .define(ReplicaFetchBackoffMsProp, INT, Defaults.ReplicaFetchBackoffMs, atLeast(0), MEDIUM, ReplicaFetchBackoffMsDoc) - .define(ReplicaFetchMinBytesProp, INT, Defaults.ReplicaFetchMinBytes, HIGH, ReplicaFetchMinBytesDoc) - .define(NumReplicaFetchersProp, INT, Defaults.NumReplicaFetchers, HIGH, NumReplicaFetchersDoc) - .define(ReplicaHighWatermarkCheckpointIntervalMsProp, LONG, Defaults.ReplicaHighWatermarkCheckpointIntervalMs, HIGH, ReplicaHighWatermarkCheckpointIntervalMsDoc) - .define(FetchPurgatoryPurgeIntervalRequestsProp, INT, Defaults.FetchPurgatoryPurgeIntervalRequests, MEDIUM, FetchPurgatoryPurgeIntervalRequestsDoc) - .define(ProducerPurgatoryPurgeIntervalRequestsProp, INT, Defaults.ProducerPurgatoryPurgeIntervalRequests, MEDIUM, ProducerPurgatoryPurgeIntervalRequestsDoc) - .define(AutoLeaderRebalanceEnableProp, BOOLEAN, Defaults.AutoLeaderRebalanceEnable, HIGH, AutoLeaderRebalanceEnableDoc) - .define(LeaderImbalancePerBrokerPercentageProp, INT, Defaults.LeaderImbalancePerBrokerPercentage, HIGH, LeaderImbalancePerBrokerPercentageDoc) - .define(LeaderImbalanceCheckIntervalSecondsProp, LONG, Defaults.LeaderImbalanceCheckIntervalSeconds, HIGH, LeaderImbalanceCheckIntervalSecondsDoc) - .define(UncleanLeaderElectionEnableProp, BOOLEAN, Defaults.UncleanLeaderElectionEnable, HIGH, UncleanLeaderElectionEnableDoc) - .define(InterBrokerSecurityProtocolProp, STRING, Defaults.InterBrokerSecurityProtocol, MEDIUM, InterBrokerSecurityProtocolDoc) - .define(InterBrokerProtocolVersionProp, STRING, Defaults.InterBrokerProtocolVersion, MEDIUM, InterBrokerProtocolVersionDoc) + .define(DefaultReplicationFactorProp, INT, Defaults.DefaultReplicationFactor, MEDIUM, DefaultReplicationFactorDoc) + .define(ReplicaLagTimeMaxMsProp, LONG, Defaults.ReplicaLagTimeMaxMs, HIGH, ReplicaLagTimeMaxMsDoc) + .define(ReplicaSocketTimeoutMsProp, INT, Defaults.ReplicaSocketTimeoutMs, HIGH, ReplicaSocketTimeoutMsDoc) + .define(ReplicaSocketReceiveBufferBytesProp, INT, Defaults.ReplicaSocketReceiveBufferBytes, HIGH, ReplicaSocketReceiveBufferBytesDoc) + .define(ReplicaFetchMaxBytesProp, INT, Defaults.ReplicaFetchMaxBytes, HIGH, ReplicaFetchMaxBytesDoc) + .define(ReplicaFetchWaitMaxMsProp, INT, Defaults.ReplicaFetchWaitMaxMs, HIGH, ReplicaFetchWaitMaxMsDoc) + .define(ReplicaFetchBackoffMsProp, INT, Defaults.ReplicaFetchBackoffMs, atLeast(0), MEDIUM, ReplicaFetchBackoffMsDoc) + .define(ReplicaFetchMinBytesProp, INT, Defaults.ReplicaFetchMinBytes, HIGH, ReplicaFetchMinBytesDoc) + .define(NumReplicaFetchersProp, INT, Defaults.NumReplicaFetchers, HIGH, NumReplicaFetchersDoc) + .define(ReplicaHighWatermarkCheckpointIntervalMsProp, LONG, Defaults.ReplicaHighWatermarkCheckpointIntervalMs, HIGH, ReplicaHighWatermarkCheckpointIntervalMsDoc) + .define(FetchPurgatoryPurgeIntervalRequestsProp, INT, Defaults.FetchPurgatoryPurgeIntervalRequests, MEDIUM, FetchPurgatoryPurgeIntervalRequestsDoc) + .define(ProducerPurgatoryPurgeIntervalRequestsProp, INT, Defaults.ProducerPurgatoryPurgeIntervalRequests, MEDIUM, ProducerPurgatoryPurgeIntervalRequestsDoc) + .define(AutoLeaderRebalanceEnableProp, BOOLEAN, Defaults.AutoLeaderRebalanceEnable, HIGH, AutoLeaderRebalanceEnableDoc) + .define(LeaderImbalancePerBrokerPercentageProp, INT, Defaults.LeaderImbalancePerBrokerPercentage, HIGH, LeaderImbalancePerBrokerPercentageDoc) + .define(LeaderImbalanceCheckIntervalSecondsProp, LONG, Defaults.LeaderImbalanceCheckIntervalSeconds, HIGH, LeaderImbalanceCheckIntervalSecondsDoc) + .define(UncleanLeaderElectionEnableProp, BOOLEAN, Defaults.UncleanLeaderElectionEnable, HIGH, UncleanLeaderElectionEnableDoc) + .define(InterBrokerSecurityProtocolProp, STRING, Defaults.InterBrokerSecurityProtocol, MEDIUM, InterBrokerSecurityProtocolDoc) + .define(InterBrokerProtocolVersionProp, STRING, Defaults.InterBrokerProtocolVersion, MEDIUM, InterBrokerProtocolVersionDoc) /** ********* Controlled shutdown configuration ***********/ .define(ControlledShutdownMaxRetriesProp, INT, Defaults.ControlledShutdownMaxRetries, MEDIUM, ControlledShutdownMaxRetriesDoc) - .define(ControlledShutdownRetryBackoffMsProp, LONG, Defaults.ControlledShutdownRetryBackoffMs, MEDIUM, ControlledShutdownRetryBackoffMsDoc) - .define(ControlledShutdownEnableProp, BOOLEAN, Defaults.ControlledShutdownEnable, MEDIUM, ControlledShutdownEnableDoc) + .define(ControlledShutdownRetryBackoffMsProp, LONG, Defaults.ControlledShutdownRetryBackoffMs, MEDIUM, ControlledShutdownRetryBackoffMsDoc) + .define(ControlledShutdownEnableProp, BOOLEAN, Defaults.ControlledShutdownEnable, MEDIUM, ControlledShutdownEnableDoc) /** ********* Consumer coordinator configuration ***********/ .define(ConsumerMinSessionTimeoutMsProp, INT, Defaults.ConsumerMinSessionTimeoutMs, MEDIUM, ConsumerMinSessionTimeoutMsDoc) - .define(ConsumerMaxSessionTimeoutMsProp, INT, Defaults.ConsumerMaxSessionTimeoutMs, MEDIUM, ConsumerMaxSessionTimeoutMsDoc) + .define(ConsumerMaxSessionTimeoutMsProp, INT, Defaults.ConsumerMaxSessionTimeoutMs, MEDIUM, ConsumerMaxSessionTimeoutMsDoc) /** ********* Offset management configuration ***********/ .define(OffsetMetadataMaxSizeProp, INT, Defaults.OffsetMetadataMaxSize, HIGH, OffsetMetadataMaxSizeDoc) - .define(OffsetsLoadBufferSizeProp, INT, Defaults.OffsetsLoadBufferSize, atLeast(1), HIGH, OffsetsLoadBufferSizeDoc) - .define(OffsetsTopicReplicationFactorProp, SHORT, Defaults.OffsetsTopicReplicationFactor, atLeast(1), HIGH, OffsetsTopicReplicationFactorDoc) - .define(OffsetsTopicPartitionsProp, INT, Defaults.OffsetsTopicPartitions, atLeast(1), HIGH, OffsetsTopicPartitionsDoc) - .define(OffsetsTopicSegmentBytesProp, INT, Defaults.OffsetsTopicSegmentBytes, atLeast(1), HIGH, OffsetsTopicSegmentBytesDoc) - .define(OffsetsTopicCompressionCodecProp, INT, Defaults.OffsetsTopicCompressionCodec, HIGH, OffsetsTopicCompressionCodecDoc) - .define(OffsetsRetentionMinutesProp, INT, Defaults.OffsetsRetentionMinutes, atLeast(1), HIGH, OffsetsRetentionMinutesDoc) - .define(OffsetsRetentionCheckIntervalMsProp, LONG, Defaults.OffsetsRetentionCheckIntervalMs, atLeast(1), HIGH, OffsetsRetentionCheckIntervalMsDoc) - .define(OffsetCommitTimeoutMsProp, INT, Defaults.OffsetCommitTimeoutMs, atLeast(1), HIGH, OffsetCommitTimeoutMsDoc) - .define(OffsetCommitRequiredAcksProp, SHORT, Defaults.OffsetCommitRequiredAcks, HIGH, OffsetCommitRequiredAcksDoc) - .define(DeleteTopicEnableProp, BOOLEAN, Defaults.DeleteTopicEnable, HIGH, DeleteTopicEnableDoc) - .define(CompressionTypeProp, STRING, Defaults.CompressionType, HIGH, CompressionTypeDoc) - .define(MetricNumSamplesProp, INT, Defaults.MetricNumSamples, atLeast(1), LOW, MetricNumSamplesDoc) - .define(MetricSampleWindowMsProp, LONG, Defaults.MetricSampleWindowMs, atLeast(1), LOW, MetricSampleWindowMsDoc) - .define(MetricReporterClassesProp, LIST, Defaults.MetricReporterClasses, LOW, MetricReporterClassesDoc) + .define(OffsetsLoadBufferSizeProp, INT, Defaults.OffsetsLoadBufferSize, atLeast(1), HIGH, OffsetsLoadBufferSizeDoc) + .define(OffsetsTopicReplicationFactorProp, SHORT, Defaults.OffsetsTopicReplicationFactor, atLeast(1), HIGH, OffsetsTopicReplicationFactorDoc) + .define(OffsetsTopicPartitionsProp, INT, Defaults.OffsetsTopicPartitions, atLeast(1), HIGH, OffsetsTopicPartitionsDoc) + .define(OffsetsTopicSegmentBytesProp, INT, Defaults.OffsetsTopicSegmentBytes, atLeast(1), HIGH, OffsetsTopicSegmentBytesDoc) + .define(OffsetsTopicCompressionCodecProp, INT, Defaults.OffsetsTopicCompressionCodec, HIGH, OffsetsTopicCompressionCodecDoc) + .define(OffsetsRetentionMinutesProp, INT, Defaults.OffsetsRetentionMinutes, atLeast(1), HIGH, OffsetsRetentionMinutesDoc) + .define(OffsetsRetentionCheckIntervalMsProp, LONG, Defaults.OffsetsRetentionCheckIntervalMs, atLeast(1), HIGH, OffsetsRetentionCheckIntervalMsDoc) + .define(OffsetCommitTimeoutMsProp, INT, Defaults.OffsetCommitTimeoutMs, atLeast(1), HIGH, OffsetCommitTimeoutMsDoc) + .define(OffsetCommitRequiredAcksProp, SHORT, Defaults.OffsetCommitRequiredAcks, HIGH, OffsetCommitRequiredAcksDoc) + .define(DeleteTopicEnableProp, BOOLEAN, Defaults.DeleteTopicEnable, HIGH, DeleteTopicEnableDoc) + .define(CompressionTypeProp, STRING, Defaults.CompressionType, HIGH, CompressionTypeDoc) + .define(MetricNumSamplesProp, INT, Defaults.MetricNumSamples, atLeast(1), LOW, MetricNumSamplesDoc) + .define(MetricSampleWindowMsProp, LONG, Defaults.MetricSampleWindowMs, atLeast(1), LOW, MetricSampleWindowMsDoc) + .define(MetricReporterClassesProp, LIST, Defaults.MetricReporterClasses, LOW, MetricReporterClassesDoc) /** ********* SSL Configuration ****************/ .define(PrincipalBuilderClassProp, STRING, Defaults.PrincipalBuilderClass, MEDIUM, PrincipalBuilderClassDoc) - .define(SSLProtocolProp, STRING, Defaults.SSLProtocol, MEDIUM, SSLProtocolDoc) - .define(SSLProviderProp, STRING, MEDIUM, SSLProviderDoc, false) - .define(SSLEnabledProtocolsProp, LIST, Defaults.SSLEnabledProtocols, MEDIUM, SSLEnabledProtocolsDoc) - .define(SSLKeystoreTypeProp, STRING, Defaults.SSLKeystoreType, MEDIUM, SSLKeystoreTypeDoc) - .define(SSLKeystoreLocationProp, STRING, Defaults.SSLKeystoreLocation, MEDIUM, SSLKeystoreLocationDoc) - .define(SSLKeystorePasswordProp, STRING, Defaults.SSLKeystorePassword, MEDIUM, SSLKeystorePasswordDoc) - .define(SSLKeyPasswordProp, STRING, Defaults.SSLKeyPassword, MEDIUM, SSLKeyPasswordDoc) - .define(SSLTruststoreTypeProp, STRING, Defaults.SSLTruststoreType, MEDIUM, SSLTruststoreTypeDoc) - .define(SSLTruststoreLocationProp, STRING, Defaults.SSLTruststoreLocation, MEDIUM, SSLTruststoreLocationDoc) - .define(SSLTruststorePasswordProp, STRING, Defaults.SSLTruststorePassword, MEDIUM, SSLTruststorePasswordDoc) - .define(SSLKeyManagerAlgorithmProp, STRING, Defaults.SSLKeyManagerAlgorithm, MEDIUM, SSLKeyManagerAlgorithmDoc) - .define(SSLTrustManagerAlgorithmProp, STRING, Defaults.SSLTrustManagerAlgorithm, MEDIUM, SSLTrustManagerAlgorithmDoc) - .define(SSLNeedClientAuthProp, BOOLEAN, Defaults.SSLNeedClientAuth, MEDIUM, SSLNeedClientAuthDoc) - .define(SSLWantClientAuthProp, BOOLEAN, Defaults.SSLWantClientAuth, MEDIUM, SSLWantClientAuthDoc) + .define(SSLProtocolProp, STRING, Defaults.SSLProtocol, MEDIUM, SSLProtocolDoc) + .define(SSLProviderProp, STRING, MEDIUM, SSLProviderDoc, false) + .define(SSLEnabledProtocolsProp, LIST, Defaults.SSLEnabledProtocols, MEDIUM, SSLEnabledProtocolsDoc) + .define(SSLKeystoreTypeProp, STRING, Defaults.SSLKeystoreType, MEDIUM, SSLKeystoreTypeDoc) + .define(SSLKeystoreLocationProp, STRING, Defaults.SSLKeystoreLocation, MEDIUM, SSLKeystoreLocationDoc) + .define(SSLKeystorePasswordProp, STRING, Defaults.SSLKeystorePassword, MEDIUM, SSLKeystorePasswordDoc) + .define(SSLKeyPasswordProp, STRING, Defaults.SSLKeyPassword, MEDIUM, SSLKeyPasswordDoc) + .define(SSLTruststoreTypeProp, STRING, Defaults.SSLTruststoreType, MEDIUM, SSLTruststoreTypeDoc) + .define(SSLTruststoreLocationProp, STRING, Defaults.SSLTruststoreLocation, MEDIUM, SSLTruststoreLocationDoc) + .define(SSLTruststorePasswordProp, STRING, Defaults.SSLTruststorePassword, MEDIUM, SSLTruststorePasswordDoc) + .define(SSLKeyManagerAlgorithmProp, STRING, Defaults.SSLKeyManagerAlgorithm, MEDIUM, SSLKeyManagerAlgorithmDoc) + .define(SSLTrustManagerAlgorithmProp, STRING, Defaults.SSLTrustManagerAlgorithm, MEDIUM, SSLTrustManagerAlgorithmDoc) + .define(SSLNeedClientAuthProp, BOOLEAN, Defaults.SSLNeedClientAuth, MEDIUM, SSLNeedClientAuthDoc) + .define(SSLWantClientAuthProp, BOOLEAN, Defaults.SSLWantClientAuth, MEDIUM, SSLWantClientAuthDoc) } def configNames() = { @@ -622,26 +622,6 @@ object KafkaConfig { fromProps(props) } - def channelConfigs: java.util.Map[String, Object] = { - val channelConfigs = new java.util.HashMap[String, Object]() - import kafka.server.KafkaConfig._ - channelConfigs.put(PrincipalBuilderClassProp, Class.forName(principalBuilderClass)) - channelConfigs.put(SSLProtocolProp, sslProtocol) - channelConfigs.put(SSLEnabledProtocolsProp, sslEnabledProtocols) - channelConfigs.put(SSLKeystoreTypeProp, sslKeystoreType) - channelConfigs.put(SSLKeystoreLocationProp, sslKeystoreLocation) - channelConfigs.put(SSLKeystorePasswordProp, sslKeystorePassword) - channelConfigs.put(SSLKeyPasswordProp, sslKeyPassword) - channelConfigs.put(SSLTruststoreTypeProp, sslTruststoreType) - channelConfigs.put(SSLTruststoreLocationProp, sslTruststoreLocation) - channelConfigs.put(SSLTruststorePasswordProp, sslTruststorePassword) - channelConfigs.put(SSLKeyManagerAlgorithmProp, sslKeyManagerAlgorithm) - channelConfigs.put(SSLTrustManagerAlgorithmProp, sslTrustManagerAlgorithm) - channelConfigs.put(SSLNeedClientAuthProp, sslNeedClientAuth: java.lang.Boolean) - channelConfigs.put(SSLWantClientAuthProp, sslWantClientAuth: java.lang.Boolean) - channelConfigs - } - } case class KafkaConfig (props: java.util.Map[_, _]) extends AbstractConfig(KafkaConfig.configDef, props) { @@ -753,6 +733,23 @@ case class KafkaConfig (props: java.util.Map[_, _]) extends AbstractConfig(Kafka val metricSampleWindowMs = getLong(KafkaConfig.MetricSampleWindowMsProp) val metricReporterClasses: java.util.List[MetricsReporter] = getConfiguredInstances(KafkaConfig.MetricReporterClassesProp, classOf[MetricsReporter]) + /** ********* SSL Configuration **************/ + val principalBuilderClass = getString(KafkaConfig.PrincipalBuilderClassProp) + val sslProtocol = getString(KafkaConfig.SSLProtocolProp) + val sslProvider = getString(KafkaConfig.SSLProviderProp) + val sslEnabledProtocols = getList(KafkaConfig.SSLEnabledProtocolsProp) + val sslKeystoreType = getString(KafkaConfig.SSLKeystoreTypeProp) + val sslKeystoreLocation = getString(KafkaConfig.SSLKeystoreLocationProp) + val sslKeystorePassword = getString(KafkaConfig.SSLKeystorePasswordProp) + val sslKeyPassword = getString(KafkaConfig.SSLKeyPasswordProp) + val sslTruststoreType = getString(KafkaConfig.SSLTruststoreTypeProp) + val sslTruststoreLocation = getString(KafkaConfig.SSLTruststoreLocationProp) + val sslTruststorePassword = getString(KafkaConfig.SSLTruststorePasswordProp) + val sslKeyManagerAlgorithm = getString(KafkaConfig.SSLKeyManagerAlgorithmProp) + val sslTrustManagerAlgorithm = getString(KafkaConfig.SSLTrustManagerAlgorithmProp) + val sslNeedClientAuth = getBoolean(KafkaConfig.SSLNeedClientAuthProp) + val sslWantClientAuth = getBoolean(KafkaConfig.SSLWantClientAuthProp) + val deleteTopicEnable = getBoolean(KafkaConfig.DeleteTopicEnableProp) val compressionType = getString(KafkaConfig.CompressionTypeProp) @@ -869,4 +866,24 @@ case class KafkaConfig (props: java.util.Map[_, _]) extends AbstractConfig(Kafka " Valid options are " + BrokerCompressionCodec.brokerCompressionOptions.mkString(",")) } + def channelConfigs: java.util.Map[String, Object] = { + val channelConfigs = new java.util.HashMap[String, Object]() + import kafka.server.KafkaConfig._ + channelConfigs.put(PrincipalBuilderClassProp, Class.forName(principalBuilderClass)) + channelConfigs.put(SSLProtocolProp, sslProtocol) + channelConfigs.put(SSLEnabledProtocolsProp, sslEnabledProtocols) + channelConfigs.put(SSLKeystoreTypeProp, sslKeystoreType) + channelConfigs.put(SSLKeystoreLocationProp, sslKeystoreLocation) + channelConfigs.put(SSLKeystorePasswordProp, sslKeystorePassword) + channelConfigs.put(SSLKeyPasswordProp, sslKeyPassword) + channelConfigs.put(SSLTruststoreTypeProp, sslTruststoreType) + channelConfigs.put(SSLTruststoreLocationProp, sslTruststoreLocation) + channelConfigs.put(SSLTruststorePasswordProp, sslTruststorePassword) + channelConfigs.put(SSLKeyManagerAlgorithmProp, sslKeyManagerAlgorithm) + channelConfigs.put(SSLTrustManagerAlgorithmProp, sslTrustManagerAlgorithm) + channelConfigs.put(SSLNeedClientAuthProp, sslNeedClientAuth: java.lang.Boolean) + channelConfigs.put(SSLWantClientAuthProp, sslWantClientAuth: java.lang.Boolean) + channelConfigs + } + } -- 2.4.6 From 8f7ba892502b09cb7cc05d75270352815fb1c42c Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Sun, 21 Jun 2015 15:35:52 -0700 Subject: [PATCH 18/30] KAFKA-1690. Added SSLProducerSendTest. --- .../test/scala/integration/kafka/api/SSLProducerSendTest.scala | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala b/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala index 73e2d2c..3a9239c 100644 --- a/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala +++ b/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala @@ -120,12 +120,12 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { case e: Throwable => fail("Only expecting IllegalArgumentException", e) } - // // non-blocking send a list of records - // for (i <- 1 to numRecords) - // producer.send(record0, callback) + // non-blocking send a list of records + for (i <- 1 to numRecords) + producer.send(record0, callback) - // // check that all messages have been acked via offset - // assertEquals("Should have offset " + (numRecords + 4), numRecords + 4L, producer.send(record0, callback).get.offset) + // check that all messages have been acked via offset + assertEquals("Should have offset " + (numRecords + 4), numRecords + 4L, producer.send(record0, callback).get.offset) } finally { if (producer != null) { -- 2.4.6 From 0dba29f7bd5489163949641030a98c308f25cb67 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Tue, 23 Jun 2015 09:16:29 -0700 Subject: [PATCH 19/30] KAFKA-1690. Minor fixes based on patch review comments. --- .../apache/kafka/common/network/SSLFactory.java | 4 +--- .../kafka/common/network/SSLTransportLayer.java | 12 +++++----- .../main/scala/kafka/network/SocketServer.scala | 26 +++++++++++----------- 3 files changed, 19 insertions(+), 23 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java index ec53b69..b843ee1 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java @@ -74,9 +74,7 @@ public class SSLFactory implements Configurable { if (configs.containsKey(SSLConfigs.SSL_NEED_CLIENT_AUTH_CONFIG)) { this.needClientAuth = (Boolean) configs.get(SSLConfigs.SSL_NEED_CLIENT_AUTH_CONFIG); - } - - if (configs.containsKey(SSLConfigs.SSL_WANT_CLIENT_AUTH_CONFIG)) { + } else if (configs.containsKey(SSLConfigs.SSL_WANT_CLIENT_AUTH_CONFIG)) { this.wantClientAuth = (Boolean) configs.get(SSLConfigs.SSL_WANT_CLIENT_AUTH_CONFIG); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index 8e88b7a..f644d44 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -56,7 +56,6 @@ public class SSLTransportLayer implements TransportLayer { private ByteBuffer appReadBuffer; private ByteBuffer emptyBuf = ByteBuffer.allocate(0); private int interestOps; - private int socketSendBufferSize; public SSLTransportLayer(SelectionKey key, SSLEngine sslEngine) throws IOException { this.key = key; @@ -65,7 +64,6 @@ public class SSLTransportLayer implements TransportLayer { this.netReadBuffer = ByteBuffer.allocateDirect(packetBufferSize()); this.netWriteBuffer = ByteBuffer.allocateDirect(packetBufferSize()); this.appReadBuffer = ByteBuffer.allocateDirect(applicationBufferSize()); - this.socketSendBufferSize = this.socketChannel.socket().getSendBufferSize(); startHandshake(); } @@ -192,7 +190,7 @@ public class SSLTransportLayer implements TransportLayer { int currentPacketBufferSize = packetBufferSize(); netWriteBuffer = Utils.ensureCapacity(netWriteBuffer, currentPacketBufferSize); if (netWriteBuffer.position() >= currentPacketBufferSize) { - throw new IllegalStateException("Buffer overflow when available data (" + netWriteBuffer.position() + + throw new IllegalStateException("Buffer overflow when available data size (" + netWriteBuffer.position() + ") >= network buffer size (" + currentPacketBufferSize + ")"); } } else if (handshakeResult.getStatus() == Status.BUFFER_UNDERFLOW) { @@ -218,7 +216,7 @@ public class SSLTransportLayer implements TransportLayer { int currentAppBufferSize = applicationBufferSize(); netReadBuffer = Utils.ensureCapacity(netReadBuffer, currentAppBufferSize); if (netReadBuffer.position() > currentAppBufferSize) { - throw new IllegalStateException("Buffer underflow when available data (" + netReadBuffer.position() + + throw new IllegalStateException("Buffer underflow when available data size (" + netReadBuffer.position() + ") > packet buffer size (" + currentAppBufferSize + ")"); } } else if (handshakeResult.getStatus() == Status.CLOSED) { @@ -388,7 +386,7 @@ public class SSLTransportLayer implements TransportLayer { int currentApplicationBufferSize = applicationBufferSize(); appReadBuffer = Utils.ensureCapacity(appReadBuffer, currentApplicationBufferSize); if (appReadBuffer.position() >= currentApplicationBufferSize) { - throw new IllegalStateException("Buffer overflow when available data (" + appReadBuffer.position() + + throw new IllegalStateException("Buffer overflow when available data size (" + appReadBuffer.position() + ") >= application buffer size (" + currentApplicationBufferSize + ")"); } if (dst.hasRemaining()) @@ -399,7 +397,7 @@ public class SSLTransportLayer implements TransportLayer { int currentPacketBufferSize = packetBufferSize(); netReadBuffer = Utils.ensureCapacity(netReadBuffer, currentPacketBufferSize); if (netReadBuffer.position() >= currentPacketBufferSize) { - throw new IllegalStateException("Buffer underflow when available data (" + netReadBuffer.position() + + throw new IllegalStateException("Buffer underflow when available data size (" + netReadBuffer.position() + ") > packet buffer size (" + currentPacketBufferSize + ")"); } break; @@ -554,7 +552,7 @@ public class SSLTransportLayer implements TransportLayer { /** * returns a SSL Session after the handshake is established - * throws IlleagalStateException if the handshake is not established + * throws IllegalStateException if the handshake is not established */ public SSLSession sslSession() throws IllegalStateException { return sslEngine.getSession(); diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index cb125c2..fd4b667 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -102,15 +102,14 @@ class SocketServer(val config: KafkaConfig) extends Logging with KafkaMetricsGro val quotas = new ConnectionQuotas(maxConnectionsPerIp, maxConnectionsPerIpOverrides) this.synchronized { - var processorIndex = 0 + var processorBeginIndex = 0 endpoints.values.foreach(endpoint => { - val acceptor = new Acceptor(endpoint.host, endpoint.port, sendBufferSize, recvBufferSize, requestChannel, processors, quotas, endpoint.protocolType, - portToProtocol, channelConfigs, numProcessorThreads + processorIndex, maxQueuedRequests, maxRequestSize, connectionsMaxIdleMs, new Metrics(metricConfig, reporters, time), - allMetricNames, time, config.brokerId, processorIndex) + val acceptor = new Acceptor(endpoint.host, endpoint.port, sendBufferSize, recvBufferSize, config.brokerId, requestChannel, processors, processorBeginIndex, numProcessorThreads, quotas, + endpoint.protocolType, portToProtocol, channelConfigs, maxQueuedRequests, maxRequestSize, connectionsMaxIdleMs, new Metrics(metricConfig, reporters, time), allMetricNames, time) acceptors.put(endpoint, acceptor) Utils.newThread("kafka-socket-acceptor-%s-%d".format(endpoint.protocolType.toString, endpoint.port), acceptor, false).start() acceptor.awaitStartup - processorIndex += numProcessorThreads + processorBeginIndex += numProcessorThreads }) } @@ -211,23 +210,24 @@ private[kafka] class Acceptor(val host: String, private val port: Int, val sendBufferSize: Int, val recvBufferSize: Int, + brokerId: Int, requestChannel: RequestChannel, processors: Array[Processor], + processorBeginIndex: Int, + numProcessorThreads: Int, connectionQuotas: ConnectionQuotas, protocol: SecurityProtocol, portToProtocol: ConcurrentHashMap[Int, SecurityProtocol], channelConfigs: java.util.Map[String, Object], - numProcessorThreads: Int, maxQueuedRequests: Int, maxRequestSize: Int, connectionsMaxIdleMs: Long, metrics: Metrics, allMetricNames: Seq[MetricName], - time: Time, - brokerId: Int, - processorIndex: Int) extends AbstractServerThread(connectionQuotas) with KafkaMetricsGroup { + time: Time) extends AbstractServerThread(connectionQuotas) with KafkaMetricsGroup { val nioSelector = java.nio.channels.Selector.open() val serverChannel = openServerSocket(host, port) + val processorEndIndex = processorBeginIndex + numProcessorThreads portToProtocol.put(serverChannel.socket().getLocalPort, protocol) @@ -239,7 +239,7 @@ private[kafka] class Acceptor(val host: String, ) this.synchronized { - for (i <- processorIndex until numProcessorThreads) { + for (i <- processorBeginIndex until processorEndIndex) { processors(i) = new Processor(i, time, maxRequestSize, @@ -261,7 +261,7 @@ private[kafka] class Acceptor(val host: String, def run() { serverChannel.register(nioSelector, SelectionKey.OP_ACCEPT); startupComplete() - var currentProcessor = processorIndex + var currentProcessor = processorBeginIndex while(isRunning) { val ready = nioSelector.select(500) if(ready > 0) { @@ -278,8 +278,8 @@ private[kafka] class Acceptor(val host: String, throw new IllegalStateException("Unrecognized key state for acceptor thread.") // round robin to the next processor thread - currentProcessor = (currentProcessor + 1) % numProcessorThreads - if (currentProcessor < processorIndex) currentProcessor = processorIndex + currentProcessor = (currentProcessor + 1) % processorEndIndex + if (currentProcessor < processorBeginIndex) currentProcessor = processorEndIndex } catch { case e: Throwable => error("Error while accepting connection", e) } -- 2.4.6 From e44c90e4ce4c36243b1211e25e89d41e82acaf4e Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Fri, 10 Jul 2015 20:05:44 -0700 Subject: [PATCH 20/30] Merge commit --- .rat-excludes | 26 - Vagrantfile | 7 +- build.gradle | 81 ++- checkstyle/import-control.xml | 7 + .../apache/kafka/clients/consumer/Consumer.java | 5 + .../kafka/clients/consumer/ConsumerRecords.java | 7 + .../clients/consumer/ConsumerWakeupException.java | 20 + .../kafka/clients/consumer/KafkaConsumer.java | 729 ++++++++++++++++----- .../kafka/clients/consumer/MockConsumer.java | 9 +- .../clients/consumer/OffsetResetStrategy.java | 17 + .../clients/consumer/internals/Coordinator.java | 474 +++++++------- .../kafka/clients/consumer/internals/Fetcher.java | 159 ++--- .../clients/consumer/internals/Heartbeat.java | 10 + .../clients/consumer/internals/RequestFuture.java | 209 ++++++ .../consumer/internals/SubscriptionState.java | 41 +- .../kafka/clients/producer/KafkaProducer.java | 7 +- .../producer/internals/ErrorLoggingCallback.java | 14 +- .../producer/internals/RecordAccumulator.java | 10 +- .../org/apache/kafka/common/network/Selector.java | 1 + .../org/apache/kafka/common/protocol/Errors.java | 6 +- .../apache/kafka/common/record/MemoryRecords.java | 8 +- .../common/requests/OffsetCommitResponse.java | 8 +- .../kafka/common/requests/OffsetFetchRequest.java | 3 - .../kafka/common/requests/OffsetFetchResponse.java | 5 +- .../java/org/apache/kafka/common/utils/Utils.java | 15 + .../kafka/clients/consumer/MockConsumerTest.java | 2 +- .../consumer/internals/CoordinatorTest.java | 147 ++++- .../clients/consumer/internals/FetcherTest.java | 32 +- .../clients/consumer/internals/HeartbeatTest.java | 9 + .../consumer/internals/SubscriptionStateTest.java | 19 +- .../org/apache/kafka/common/utils/UtilsTest.java | 12 +- core/src/main/scala/kafka/admin/TopicCommand.scala | 4 +- core/src/main/scala/kafka/cluster/Partition.scala | 23 +- .../kafka/common/OffsetMetadataAndError.scala | 14 +- core/src/main/scala/kafka/common/Topic.scala | 4 +- .../scala/kafka/common/TopicAndPartition.scala | 6 +- .../scala/kafka/controller/KafkaController.scala | 75 ++- .../kafka/coordinator/ConsumerCoordinator.scala | 176 ++++- .../kafka/coordinator/CoordinatorMetadata.scala | 8 +- core/src/main/scala/kafka/log/LogCleaner.scala | 12 +- core/src/main/scala/kafka/log/OffsetIndex.scala | 2 +- .../scala/kafka/producer/KafkaLog4jAppender.scala | 97 --- core/src/main/scala/kafka/server/KafkaApis.scala | 69 +- core/src/main/scala/kafka/server/KafkaConfig.scala | 3 +- core/src/main/scala/kafka/server/KafkaServer.scala | 26 +- .../main/scala/kafka/server/OffsetManager.scala | 52 +- .../main/scala/kafka/server/ReplicaManager.scala | 95 ++- core/src/main/scala/kafka/tools/MirrorMaker.scala | 4 +- .../main/scala/kafka/utils/ReplicationUtils.scala | 16 +- core/src/main/scala/kafka/utils/ZkUtils.scala | 1 + .../integration/kafka/api/ConsumerBounceTest.scala | 14 +- .../scala/integration/kafka/api/ConsumerTest.scala | 7 +- .../kafka/api/IntegrationTestHarness.scala | 9 +- .../test/scala/other/kafka/TestOffsetManager.scala | 17 + .../scala/unit/kafka/admin/TopicCommandTest.scala | 8 +- .../unit/kafka/consumer/TopicFilterTest.scala | 9 +- .../ConsumerCoordinatorResponseTest.scala | 327 +++++++++ .../coordinator/CoordinatorMetadataTest.scala | 2 +- .../unit/kafka/integration/TopicMetadataTest.scala | 84 ++- .../unit/kafka/log4j/KafkaLog4jAppenderTest.scala | 143 ---- .../scala/unit/kafka/server/KafkaConfigTest.scala | 36 +- .../scala/unit/kafka/server/OffsetCommitTest.scala | 10 +- .../unit/kafka/utils/ReplicationUtilsTest.scala | 2 + gradle/buildscript.gradle | 19 +- gradle/license.gradle | 9 - gradle/rat.gradle | 115 ++++ gradle/resources/rat-output-to-html.xsl | 206 ++++++ kafka-merge-pr.py | 442 +++++++++++++ kafka-patch-review.py | 17 + .../kafka/log4jappender/KafkaLog4jAppender.java | 167 +++++ .../log4jappender/KafkaLog4jAppenderTest.java | 98 +++ .../log4jappender/MockKafkaLog4jAppender.java | 47 ++ scala.gradle | 15 + settings.gradle | 2 +- topics.json | 4 - vagrant/broker.sh | 5 + vagrant/zk.sh | 7 +- 77 files changed, 3506 insertions(+), 1101 deletions(-) delete mode 100644 .rat-excludes create mode 100644 clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerWakeupException.java create mode 100644 clients/src/main/java/org/apache/kafka/clients/consumer/OffsetResetStrategy.java create mode 100644 clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java delete mode 100644 core/src/main/scala/kafka/producer/KafkaLog4jAppender.scala create mode 100644 core/src/test/scala/unit/kafka/coordinator/ConsumerCoordinatorResponseTest.scala delete mode 100755 core/src/test/scala/unit/kafka/log4j/KafkaLog4jAppenderTest.scala delete mode 100644 gradle/license.gradle create mode 100644 gradle/rat.gradle create mode 100644 gradle/resources/rat-output-to-html.xsl create mode 100644 kafka-merge-pr.py create mode 100644 log4j-appender/src/main/java/org/apache/kafka/log4jappender/KafkaLog4jAppender.java create mode 100644 log4j-appender/src/test/java/org/apache/kafka/log4jappender/KafkaLog4jAppenderTest.java create mode 100644 log4j-appender/src/test/java/org/apache/kafka/log4jappender/MockKafkaLog4jAppender.java delete mode 100644 topics.json diff --git a/.rat-excludes b/.rat-excludes deleted file mode 100644 index 01d6298..0000000 --- a/.rat-excludes +++ /dev/null @@ -1,26 +0,0 @@ -.rat-excludes -rat.out -sbt -sbt.boot.lock -README* -.gitignore -.git -.svn -build.properties -target -src_managed -update.log -clients/target -core/target -contrib/target -project/plugins/target -project/build/target -*.iml -*.csproj -TODO -Makefile* -*.html -*.xml -*expected.out -*.kafka - diff --git a/Vagrantfile b/Vagrantfile index 55c67dd..1d7cc01 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -22,6 +22,7 @@ VAGRANTFILE_API_VERSION = "2" # General config enable_dns = false +enable_jmx = false num_zookeepers = 1 num_brokers = 3 num_workers = 0 # Generic workers that get the code, but don't start any services @@ -135,7 +136,8 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| ip_address = "192.168.50." + (10 + i).to_s assign_local_ip(zookeeper, ip_address) zookeeper.vm.provision "shell", path: "vagrant/base.sh" - zookeeper.vm.provision "shell", path: "vagrant/zk.sh", :args => [i.to_s, num_zookeepers] + zk_jmx_port = enable_jmx ? (8000 + i).to_s : "" + zookeeper.vm.provision "shell", path: "vagrant/zk.sh", :args => [i.to_s, num_zookeepers, zk_jmx_port] end } @@ -151,7 +153,8 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| # used to support clients running on the host. zookeeper_connect = zookeepers.map{ |zk_addr| zk_addr + ":2181"}.join(",") broker.vm.provision "shell", path: "vagrant/base.sh" - broker.vm.provision "shell", path: "vagrant/broker.sh", :args => [i.to_s, enable_dns ? name : ip_address, zookeeper_connect] + kafka_jmx_port = enable_jmx ? (9000 + i).to_s : "" + broker.vm.provision "shell", path: "vagrant/broker.sh", :args => [i.to_s, enable_dns ? name : ip_address, zookeeper_connect, kafka_jmx_port] end } diff --git a/build.gradle b/build.gradle index 6355c56..4c6787e 100644 --- a/build.gradle +++ b/build.gradle @@ -13,11 +13,18 @@ // See the License for the specific language governing permissions and // limitations under the License. +import org.ajoberstar.grgit.Grgit + buildscript { repositories { mavenCentral() } apply from: file('gradle/buildscript.gradle'), to: buildscript + + dependencies { + // For Apache Rat plugin to ignore non-Git files, need ancient version for Java 6 compatibility + classpath group: 'org.ajoberstar', name: 'grgit', version: '0.2.3' + } } def slf4jlog4j='org.slf4j:slf4j-log4j12:1.7.6' @@ -41,8 +48,25 @@ ext { } apply from: file('wrapper.gradle') -apply from: file('gradle/license.gradle') apply from: file('scala.gradle') +apply from: file('gradle/rat.gradle') + +rat { + // Exclude everything under the directory that git should be ignoring via .gitignore or that isn't checked in. These + // restrict us only to files that are checked in or are staged. + def repo = Grgit.open(project.file('.')) + excludes = new ArrayList(repo.clean(ignore: false, directories: true, dryRun: true)) + // And some of the files that we have checked in should also be excluded from this check + excludes.addAll([ + '**/.git/**', + 'build/**', + 'gradlew', + 'gradlew.bat', + '**/README.md', + '.reviewboardrc', + 'system_test/**', + ]) +} subprojects { apply plugin: 'java' @@ -50,9 +74,7 @@ subprojects { apply plugin: 'maven' apply plugin: 'signing' - sourceCompatibility = 1.6 - - licenseTest.onlyIf { isVerificationRequired(project) } + sourceCompatibility = 1.7 uploadArchives { repositories { @@ -181,20 +203,20 @@ for ( sv in ['2_9_1', '2_9_2', '2_10_5', '2_11_6'] ) { } } -tasks.create(name: "jarAll", dependsOn: ['jar_core_2_9_1', 'jar_core_2_9_2', 'jar_core_2_10_5', 'jar_core_2_11_6', 'clients:jar', 'examples:jar', 'contrib:hadoop-consumer:jar', 'contrib:hadoop-producer:jar']) { +tasks.create(name: "jarAll", dependsOn: ['jar_core_2_9_1', 'jar_core_2_9_2', 'jar_core_2_10_5', 'jar_core_2_11_6', 'clients:jar', 'examples:jar', 'contrib:hadoop-consumer:jar', 'contrib:hadoop-producer:jar', 'log4j-appender:jar']) { } -tasks.create(name: "srcJarAll", dependsOn: ['srcJar_2_9_1', 'srcJar_2_9_2', 'srcJar_2_10_5', 'srcJar_2_11_6', 'clients:srcJar', 'examples:srcJar', 'contrib:hadoop-consumer:srcJar', 'contrib:hadoop-producer:srcJar']) { } +tasks.create(name: "srcJarAll", dependsOn: ['srcJar_2_9_1', 'srcJar_2_9_2', 'srcJar_2_10_5', 'srcJar_2_11_6', 'clients:srcJar', 'examples:srcJar', 'contrib:hadoop-consumer:srcJar', 'contrib:hadoop-producer:srcJar', 'log4j-appender:srcJar']) { } -tasks.create(name: "docsJarAll", dependsOn: ['docsJar_2_9_1', 'docsJar_2_9_2', 'docsJar_2_10_5', 'docsJar_2_11_6', 'clients:docsJar', 'examples:docsJar', 'contrib:hadoop-consumer:docsJar', 'contrib:hadoop-producer:docsJar']) { } +tasks.create(name: "docsJarAll", dependsOn: ['docsJar_2_9_1', 'docsJar_2_9_2', 'docsJar_2_10_5', 'docsJar_2_11_6', 'clients:docsJar', 'examples:docsJar', 'contrib:hadoop-consumer:docsJar', 'contrib:hadoop-producer:docsJar', 'log4j-appender:docsJar']) { } -tasks.create(name: "testAll", dependsOn: ['test_core_2_9_1', 'test_core_2_9_2', 'test_core_2_10_5', 'test_core_2_11_6', 'clients:test']) { +tasks.create(name: "testAll", dependsOn: ['test_core_2_9_1', 'test_core_2_9_2', 'test_core_2_10_5', 'test_core_2_11_6', 'clients:test', 'log4j-appender:test']) { } tasks.create(name: "releaseTarGzAll", dependsOn: ['releaseTarGz_2_9_1', 'releaseTarGz_2_9_2', 'releaseTarGz_2_10_5', 'releaseTarGz_2_11_6']) { } -tasks.create(name: "uploadArchivesAll", dependsOn: ['uploadCoreArchives_2_9_1', 'uploadCoreArchives_2_9_2', 'uploadCoreArchives_2_10_5', 'uploadCoreArchives_2_11_6', 'clients:uploadArchives', 'examples:uploadArchives', 'contrib:hadoop-consumer:uploadArchives', 'contrib:hadoop-producer:uploadArchives']) { +tasks.create(name: "uploadArchivesAll", dependsOn: ['uploadCoreArchives_2_9_1', 'uploadCoreArchives_2_9_2', 'uploadCoreArchives_2_10_5', 'uploadCoreArchives_2_11_6', 'clients:uploadArchives', 'examples:uploadArchives', 'contrib:hadoop-consumer:uploadArchives', 'contrib:hadoop-producer:uploadArchives', 'log4j-appender:uploadArchives']) { } project(':core') { @@ -207,6 +229,7 @@ project(':core') { dependencies { compile project(':clients') + compile project(':log4j-appender') compile "org.scala-lang:scala-library:$scalaVersion" compile 'org.apache.zookeeper:zookeeper:3.4.6' compile 'com.101tec:zkclient:0.5' @@ -360,7 +383,7 @@ project(':clients') { compile 'net.jpountz.lz4:lz4:1.2.0' testCompile 'org.bouncycastle:bcpkix-jdk15on:1.52' - testCompile 'com.novocode:junit-interface:0.9' + testCompile 'junit:junit:4.6' testRuntime "$slf4jlog4j" } @@ -388,6 +411,44 @@ project(':clients') { archives testJar } + configurations { + archives.extendsFrom (testCompile) + } + + checkstyle { + configFile = new File(rootDir, "checkstyle/checkstyle.xml") + } + test.dependsOn('checkstyleMain', 'checkstyleTest') +} + +project(':log4j-appender') { + apply plugin: 'checkstyle' + archivesBaseName = "kafka-log4j-appender" + + dependencies { + compile project(':clients') + compile "$slf4jlog4j" + + testCompile 'junit:junit:4.6' + testCompile project(path: ':clients', configuration: 'archives') + } + + task testJar(type: Jar) { + classifier = 'test' + from sourceSets.test.output + } + + test { + testLogging { + events "passed", "skipped", "failed" + exceptionFormat = 'full' + } + } + + javadoc { + include "**/org/apache/kafka/log4jappender/*" + } + checkstyle { configFile = new File(rootDir, "checkstyle/checkstyle.xml") } diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index 3f7c71d..0a70de2 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -104,6 +104,13 @@
    + + + + + + + diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java index 8f587bc..fd98740 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java @@ -108,4 +108,9 @@ public interface Consumer extends Closeable { */ public void close(); + /** + * @see KafkaConsumer#wakeup() + */ + public void wakeup(); + } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java index 1ca75f8..eb75d2e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecords.java @@ -27,6 +27,8 @@ import java.util.Map; * {@link Consumer#poll(long)} operation. */ public class ConsumerRecords implements Iterable> { + public static final ConsumerRecords EMPTY = + new ConsumerRecords(Collections.EMPTY_MAP); private final Map>> records; @@ -103,4 +105,9 @@ public class ConsumerRecords implements Iterable> { } } + @SuppressWarnings("unchecked") + public static ConsumerRecords empty() { + return (ConsumerRecords) EMPTY; + } + } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerWakeupException.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerWakeupException.java new file mode 100644 index 0000000..35f1ec9 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerWakeupException.java @@ -0,0 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package org.apache.kafka.clients.consumer; + +import org.apache.kafka.common.KafkaException; + +public class ConsumerWakeupException extends KafkaException { + private static final long serialVersionUID = 1L; + +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java index b3cec41..08cb02a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java @@ -12,34 +12,21 @@ */ package org.apache.kafka.clients.consumer; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - import org.apache.kafka.clients.ClientUtils; import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.NetworkClient; import org.apache.kafka.clients.consumer.internals.Coordinator; import org.apache.kafka.clients.consumer.internals.Fetcher; +import org.apache.kafka.clients.consumer.internals.RequestFuture; import org.apache.kafka.clients.consumer.internals.SubscriptionState; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; +import org.apache.kafka.common.MetricName; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.metrics.JmxReporter; import org.apache.kafka.common.metrics.MetricConfig; -import org.apache.kafka.common.MetricName; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.MetricsReporter; import org.apache.kafka.common.serialization.Deserializer; @@ -51,6 +38,24 @@ import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.ConcurrentModificationException; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.apache.kafka.common.utils.Utils.min; + /** * A Kafka client that consumes records from a Kafka cluster. *

    @@ -127,8 +132,8 @@ import org.slf4j.LoggerFactory; * props.put("enable.auto.commit", "true"); * props.put("auto.commit.interval.ms", "1000"); * props.put("session.timeout.ms", "30000"); - * props.put("key.serializer", "org.apache.kafka.common.serializers.StringSerializer"); - * props.put("value.serializer", "org.apache.kafka.common.serializers.StringSerializer"); + * props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + * props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); * KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props); * consumer.subscribe("foo", "bar"); * while (true) { @@ -155,8 +160,8 @@ import org.slf4j.LoggerFactory; * to it. If it stops heartbeating for a period of time longer than session.timeout.ms then it will be * considered dead and it's partitions will be assigned to another process. *

    - * The serializers settings specify how to turn the objects the user provides into bytes. By specifying the string - * serializers we are saying that our record's key and value will just be simple strings. + * The deserializer settings specify how to turn bytes into objects. For example, by specifying string deserializers, we + * are saying that our record's key and value will just be simple strings. * *

    Controlling When Messages Are Considered Consumed

    * @@ -179,8 +184,8 @@ import org.slf4j.LoggerFactory; * props.put("enable.auto.commit", "false"); * props.put("auto.commit.interval.ms", "1000"); * props.put("session.timeout.ms", "30000"); - * props.put("key.serializer", "org.apache.kafka.common.serializers.StringSerializer"); - * props.put("value.serializer", "org.apache.kafka.common.serializers.StringSerializer"); + * props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); + * props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); * KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props); * consumer.subscribe("foo", "bar"); * int commitInterval = 200; @@ -299,10 +304,60 @@ import org.slf4j.LoggerFactory; * *

    Multithreaded Processing

    * + * The Kafka consumer is NOT thread-safe. All network I/O happens in the thread of the application + * making the call. It is the responsibility of the user to ensure that multi-threaded access + * is properly synchronized. Un-synchronized access will result in {@link ConcurrentModificationException}. + * + *

    + * The only exception to this rule is {@link #wakeup()}, which can safely be used from an external thread to + * interrupt an active operation. In this case, a {@link ConsumerWakeupException} will be thrown from the thread + * blocking on the operation. This can be used to shutdown the consumer from another thread. The following + * snippet shows the typical pattern: + * + *

    + * public class KafkaConsumerRunner implements Runnable {
    + *     private final AtomicBoolean closed = new AtomicBoolean(false);
    + *     private final KafkaConsumer consumer;
    + *
    + *     public void run() {
    + *         try {
    + *             consumer.subscribe("topic");
    + *             while (!closed.get()) {
    + *                 ConsumerRecords records = consumer.poll(10000);
    + *                 // Handle new records
    + *             }
    + *         } catch (ConsumerWakeupException e) {
    + *             // Ignore exception if closing
    + *             if (!closed.get()) throw e;
    + *         } finally {
    + *             consumer.close();
    + *         }
    + *     }
    + *
    + *     // Shutdown hook which can be called from a separate thread
    + *     public void shutdown() {
    + *         closed.set(true);
    + *         consumer.wakeup();
    + *     }
    + * }
    + * 
    + * + * Then in a separate thread, the consumer can be shutdown by setting the closed flag and waking up the consumer. + * + *
    + *     closed.set(true);
    + *     consumer.wakeup();
    + * 
    + * +####### Ancestor + * * The Kafka consumer is threadsafe but coarsely synchronized. All network I/O happens in the thread of the application * making the call. We have intentionally avoided implementing a particular threading model for processing. +======= end *

    - * This leaves several options for implementing multi-threaded processing of records. + * We have intentionally avoided implementing a particular threading model for processing. This leaves several + * options for implementing multi-threaded processing of records. + * * *

    1. One Consumer Per Thread

    * @@ -364,6 +419,17 @@ public class KafkaConsumer implements Consumer { private final ConsumerRebalanceCallback rebalanceCallback; private long lastCommitAttemptMs; private boolean closed = false; + private final AtomicBoolean wakeup = new AtomicBoolean(false); + + // currentThread holds the threadId of the current thread accessing KafkaConsumer + // and is used to prevent multi-threaded access + private final AtomicReference currentThread = new AtomicReference(); + // refcount is used to allow reentrant access by the thread who has acquired currentThread + private final AtomicInteger refcount = new AtomicInteger(0); + + // TODO: This timeout controls how long we should wait before retrying a request. We should be able + // to leverage the work of KAFKA-2120 to get this value from configuration. + private long requestTimeoutMs = 5000L; /** * A consumer is instantiated by providing a set of key-value pairs as configuration. Valid configuration strings @@ -481,13 +547,12 @@ public class KafkaConsumer implements Consumer { config.getLong(ConsumerConfig.RECONNECT_BACKOFF_MS_CONFIG), config.getInt(ConsumerConfig.SEND_BUFFER_CONFIG), config.getInt(ConsumerConfig.RECEIVE_BUFFER_CONFIG)); - this.subscriptions = new SubscriptionState(); + OffsetResetStrategy offsetResetStrategy = OffsetResetStrategy.valueOf(config.getString(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG).toUpperCase()); + this.subscriptions = new SubscriptionState(offsetResetStrategy); this.coordinator = new Coordinator(this.client, config.getString(ConsumerConfig.GROUP_ID_CONFIG), - this.retryBackoffMs, config.getInt(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), config.getString(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG), - this.metadata, this.subscriptions, metrics, metricGrpPrefix, @@ -509,12 +574,10 @@ public class KafkaConsumer implements Consumer { this.valueDeserializer = valueDeserializer; } this.fetcher = new Fetcher(this.client, - this.retryBackoffMs, config.getInt(ConsumerConfig.FETCH_MIN_BYTES_CONFIG), config.getInt(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG), config.getInt(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG), config.getBoolean(ConsumerConfig.CHECK_CRCS_CONFIG), - config.getString(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG).toUpperCase(), this.keyDeserializer, this.valueDeserializer, this.metadata, @@ -543,8 +606,13 @@ public class KafkaConsumer implements Consumer { * then this will give the set of topics currently assigned to the consumer (which may be none if the assignment * hasn't happened yet, or the partitions are in the process of getting reassigned). */ - public synchronized Set subscriptions() { - return Collections.unmodifiableSet(this.subscriptions.assignedPartitions()); + public Set subscriptions() { + acquire(); + try { + return Collections.unmodifiableSet(this.subscriptions.assignedPartitions()); + } finally { + release(); + } } /** @@ -562,12 +630,16 @@ public class KafkaConsumer implements Consumer { * @param topics A variable list of topics that the consumer wants to subscribe to */ @Override - public synchronized void subscribe(String... topics) { - ensureNotClosed(); - log.debug("Subscribed to topic(s): {}", Utils.join(topics, ", ")); - for (String topic : topics) - this.subscriptions.subscribe(topic); - metadata.addTopics(topics); + public void subscribe(String... topics) { + acquire(); + try { + log.debug("Subscribed to topic(s): {}", Utils.join(topics, ", ")); + for (String topic : topics) + this.subscriptions.subscribe(topic); + metadata.addTopics(topics); + } finally { + release(); + } } /** @@ -579,12 +651,16 @@ public class KafkaConsumer implements Consumer { * @param partitions Partitions to incrementally subscribe to */ @Override - public synchronized void subscribe(TopicPartition... partitions) { - ensureNotClosed(); - log.debug("Subscribed to partitions(s): {}", Utils.join(partitions, ", ")); - for (TopicPartition tp : partitions) { - this.subscriptions.subscribe(tp); - metadata.addTopics(tp.topic()); + public void subscribe(TopicPartition... partitions) { + acquire(); + try { + log.debug("Subscribed to partitions(s): {}", Utils.join(partitions, ", ")); + for (TopicPartition tp : partitions) { + this.subscriptions.subscribe(tp); + metadata.addTopics(tp.topic()); + } + } finally { + release(); } } @@ -594,12 +670,16 @@ public class KafkaConsumer implements Consumer { * * @param topics Topics to unsubscribe from */ - public synchronized void unsubscribe(String... topics) { - ensureNotClosed(); - log.debug("Unsubscribed from topic(s): {}", Utils.join(topics, ", ")); - // throw an exception if the topic was never subscribed to - for (String topic : topics) - this.subscriptions.unsubscribe(topic); + public void unsubscribe(String... topics) { + acquire(); + try { + log.debug("Unsubscribed from topic(s): {}", Utils.join(topics, ", ")); + // throw an exception if the topic was never subscribed to + for (String topic : topics) + this.subscriptions.unsubscribe(topic); + } finally { + release(); + } } /** @@ -608,12 +688,16 @@ public class KafkaConsumer implements Consumer { * * @param partitions Partitions to unsubscribe from */ - public synchronized void unsubscribe(TopicPartition... partitions) { - ensureNotClosed(); - log.debug("Unsubscribed from partitions(s): {}", Utils.join(partitions, ", ")); - // throw an exception if the partition was never subscribed to - for (TopicPartition partition : partitions) - this.subscriptions.unsubscribe(partition); + public void unsubscribe(TopicPartition... partitions) { + acquire(); + try { + log.debug("Unsubscribed from partitions(s): {}", Utils.join(partitions, ", ")); + // throw an exception if the partition was never subscribed to + for (TopicPartition partition : partitions) + this.subscriptions.unsubscribe(partition); + } finally { + release(); + } } /** @@ -625,17 +709,65 @@ public class KafkaConsumer implements Consumer { * rebalance, to consume data from that offset sequentially on every poll. If not, it will use the last checkpointed * offset using {@link #commit(Map, CommitType) commit(offsets, sync)} for the subscribed list of partitions. * - * @param timeout The time, in milliseconds, spent waiting in poll if data is not available. If 0, waits - * indefinitely. Must not be negative + * @param timeout The time, in milliseconds, spent waiting in poll if data is not available. If 0, returns + * immediately with any records available now. Must not be negative. * @return map of topic to records since the last fetch for the subscribed list of topics and partitions * * @throws NoOffsetForPartitionException If there is no stored offset for a subscribed partition and no automatic * offset reset policy has been configured. */ @Override - public synchronized ConsumerRecords poll(long timeout) { - ensureNotClosed(); - long now = time.milliseconds(); + public ConsumerRecords poll(long timeout) { + acquire(); + try { + if (timeout < 0) + throw new IllegalArgumentException("Timeout must not be negative"); + + // Poll for new data until the timeout expires + long remaining = timeout; + while (remaining >= 0) { + long start = time.milliseconds(); + long pollTimeout = min(remaining, timeToNextCommit(start), coordinator.timeToNextHeartbeat(start)); + + Map>> records = pollOnce(pollTimeout, start); + long end = time.milliseconds(); + + if (!records.isEmpty()) { + // If data is available, then return it, but first send off the + // next round of fetches to enable pipelining while the user is + // handling the fetched records. + fetcher.initFetches(metadata.fetch(), end); + pollClient(0, end); + return new ConsumerRecords(records); + } + + remaining -= end - start; + + // Nothing was available, so we should backoff before retrying + if (remaining > 0) { + Utils.sleep(min(remaining, retryBackoffMs)); + remaining -= time.milliseconds() - end; + } + } + + return ConsumerRecords.empty(); + } finally { + release(); + } + } + + + /** + * Do one round of polling. In addition to checking for new data, this does any needed + * heart-beating, auto-commits, and offset updates. + * @param timeout The maximum time to block in the underlying poll + * @param now Current time in millis + * @return The fetched records (may be empty) + */ + private Map>> pollOnce(long timeout, long now) { + Cluster cluster = this.metadata.fetch(); + + // TODO: Sub-requests should take into account the poll timeout (KAFKA-1894) if (subscriptions.partitionsAutoAssigned()) { if (subscriptions.partitionAssignmentNeeded()) { @@ -650,26 +782,18 @@ public class KafkaConsumer implements Consumer { // fetch positions if we have partitions we're subscribed to that we // don't know the offset for if (!subscriptions.hasAllFetchPositions()) - updateFetchPositions(this.subscriptions.missingFetchPositions(), now); + updateFetchPositions(this.subscriptions.missingFetchPositions()); // maybe autocommit position if (shouldAutoCommit(now)) commit(CommitType.ASYNC); - /* - * initiate any needed fetches, then block for the timeout the user specified - */ - Cluster cluster = this.metadata.fetch(); + // Init any new fetches (won't resend pending fetches) fetcher.initFetches(cluster, now); - client.poll(timeout, now); - /* - * initiate a fetch request for any nodes that we just got a response from without blocking - */ - fetcher.initFetches(cluster, now); - client.poll(0, now); + pollClient(timeout, now); - return new ConsumerRecords(fetcher.fetchedRecords()); + return fetcher.fetchedRecords(); } /** @@ -679,7 +803,7 @@ public class KafkaConsumer implements Consumer { * rebalance and also on startup. As such, if you need to store offsets in anything other than Kafka, this API * should not be used. *

    - * A non-blocking commit will attempt to commit offsets asychronously. No error will be thrown if the commit fails. + * A non-blocking commit will attempt to commit offsets asynchronously. No error will be thrown if the commit fails. * A blocking commit will wait for a response acknowledging the commit. In the event of an error it will retry until * the commit succeeds. * @@ -687,18 +811,20 @@ public class KafkaConsumer implements Consumer { * @param commitType Control whether the commit is blocking */ @Override - public synchronized void commit(final Map offsets, CommitType commitType) { - ensureNotClosed(); - log.debug("Committing offsets ({}): {} ", commitType.toString().toLowerCase(), offsets); + public void commit(final Map offsets, CommitType commitType) { + acquire(); + try { + log.debug("Committing offsets ({}): {} ", commitType.toString().toLowerCase(), offsets); - long now = time.milliseconds(); - this.lastCommitAttemptMs = now; + this.lastCommitAttemptMs = time.milliseconds(); - // commit the offsets with the coordinator - boolean syncCommit = commitType.equals(CommitType.SYNC); - if (!syncCommit) - this.subscriptions.needRefreshCommits(); - coordinator.commitOffsets(offsets, syncCommit, now); + // commit the offsets with the coordinator + if (commitType == CommitType.ASYNC) + this.subscriptions.needRefreshCommits(); + commitOffsets(offsets, commitType); + } finally { + release(); + } } /** @@ -711,9 +837,15 @@ public class KafkaConsumer implements Consumer { * @param commitType Whether or not the commit should block until it is acknowledged. */ @Override - public synchronized void commit(CommitType commitType) { - ensureNotClosed(); - commit(this.subscriptions.allConsumed(), commitType); + public void commit(CommitType commitType) { + acquire(); + try { + // Need defensive copy to ensure offsets are not removed before completion (e.g. in rebalance) + Map allConsumed = new HashMap(this.subscriptions.allConsumed()); + commit(allConsumed, commitType); + } finally { + release(); + } } /** @@ -722,35 +854,43 @@ public class KafkaConsumer implements Consumer { * you may lose data if this API is arbitrarily used in the middle of consumption, to reset the fetch offsets */ @Override - public synchronized void seek(TopicPartition partition, long offset) { - ensureNotClosed(); - log.debug("Seeking to offset {} for partition {}", offset, partition); - this.subscriptions.seek(partition, offset); + public void seek(TopicPartition partition, long offset) { + acquire(); + try { + log.debug("Seeking to offset {} for partition {}", offset, partition); + this.subscriptions.seek(partition, offset); + } finally { + release(); + } } /** * Seek to the first offset for each of the given partitions */ - public synchronized void seekToBeginning(TopicPartition... partitions) { - ensureNotClosed(); - Collection parts = partitions.length == 0 ? this.subscriptions.assignedPartitions() - : Arrays.asList(partitions); - for (TopicPartition tp : parts) { - // TODO: list offset call could be optimized by grouping by node - seek(tp, fetcher.offsetBefore(tp, EARLIEST_OFFSET_TIMESTAMP)); + public void seekToBeginning(TopicPartition... partitions) { + acquire(); + try { + Collection parts = partitions.length == 0 ? this.subscriptions.assignedPartitions() + : Arrays.asList(partitions); + for (TopicPartition tp : parts) + subscriptions.needOffsetReset(tp, OffsetResetStrategy.EARLIEST); + } finally { + release(); } } /** * Seek to the last offset for each of the given partitions */ - public synchronized void seekToEnd(TopicPartition... partitions) { - ensureNotClosed(); - Collection parts = partitions.length == 0 ? this.subscriptions.assignedPartitions() - : Arrays.asList(partitions); - for (TopicPartition tp : parts) { - // TODO: list offset call could be optimized by grouping by node - seek(tp, fetcher.offsetBefore(tp, LATEST_OFFSET_TIMESTAMP)); + public void seekToEnd(TopicPartition... partitions) { + acquire(); + try { + Collection parts = partitions.length == 0 ? this.subscriptions.assignedPartitions() + : Arrays.asList(partitions); + for (TopicPartition tp : parts) + subscriptions.needOffsetReset(tp, OffsetResetStrategy.LATEST); + } finally { + release(); } } @@ -762,16 +902,20 @@ public class KafkaConsumer implements Consumer { * @throws NoOffsetForPartitionException If a position hasn't been set for a given partition, and no reset policy is * available. */ - public synchronized long position(TopicPartition partition) { - ensureNotClosed(); - if (!this.subscriptions.assignedPartitions().contains(partition)) - throw new IllegalArgumentException("You can only check the position for partitions assigned to this consumer."); - Long offset = this.subscriptions.consumed(partition); - if (offset == null) { - updateFetchPositions(Collections.singleton(partition), time.milliseconds()); - return this.subscriptions.consumed(partition); - } else { - return offset; + public long position(TopicPartition partition) { + acquire(); + try { + if (!this.subscriptions.assignedPartitions().contains(partition)) + throw new IllegalArgumentException("You can only check the position for partitions assigned to this consumer."); + Long offset = this.subscriptions.consumed(partition); + if (offset == null) { + updateFetchPositions(Collections.singleton(partition)); + return this.subscriptions.consumed(partition); + } else { + return offset; + } + } finally { + release(); } } @@ -788,22 +932,26 @@ public class KafkaConsumer implements Consumer { * partition. */ @Override - public synchronized long committed(TopicPartition partition) { - ensureNotClosed(); - Set partitionsToFetch; - if (subscriptions.assignedPartitions().contains(partition)) { + public long committed(TopicPartition partition) { + acquire(); + try { + Set partitionsToFetch; + if (subscriptions.assignedPartitions().contains(partition)) { + Long committed = this.subscriptions.committed(partition); + if (committed != null) + return committed; + partitionsToFetch = subscriptions.assignedPartitions(); + } else { + partitionsToFetch = Collections.singleton(partition); + } + refreshCommittedOffsets(partitionsToFetch); Long committed = this.subscriptions.committed(partition); - if (committed != null) - return committed; - partitionsToFetch = subscriptions.assignedPartitions(); - } else { - partitionsToFetch = Collections.singleton(partition); + if (committed == null) + throw new NoOffsetForPartitionException("No offset has been committed for partition " + partition); + return committed; + } finally { + release(); } - refreshCommittedOffsets(partitionsToFetch, time.milliseconds()); - Long committed = this.subscriptions.committed(partition); - if (committed == null) - throw new NoOffsetForPartitionException("No offset has been committed for partition " + partition); - return committed; } /** @@ -823,19 +971,40 @@ public class KafkaConsumer implements Consumer { */ @Override public List partitionsFor(String topic) { - Cluster cluster = this.metadata.fetch(); - List parts = cluster.partitionsForTopic(topic); - if (parts == null) { - metadata.add(topic); - awaitMetadataUpdate(); - parts = metadata.fetch().partitionsForTopic(topic); + acquire(); + try { + Cluster cluster = this.metadata.fetch(); + List parts = cluster.partitionsForTopic(topic); + if (parts == null) { + metadata.add(topic); + awaitMetadataUpdate(); + parts = metadata.fetch().partitionsForTopic(topic); + } + return parts; + } finally { + release(); + } + } + + @Override + public void close() { + acquire(); + try { + if (closed) return; + close(false); + } finally { + release(); } - return parts; } + /** + * Wakeup the consumer. This method is thread-safe and is useful in particular to abort a long poll. + * The thread which is blocking in an operation will throw {@link ConsumerWakeupException}. + */ @Override - public synchronized void close() { - close(false); + public void wakeup() { + this.wakeup.set(true); + this.client.wakeup(); } private void close(boolean swallowException) { @@ -857,6 +1026,15 @@ public class KafkaConsumer implements Consumer { return this.autoCommit && this.lastCommitAttemptMs <= now - this.autoCommitIntervalMs; } + private long timeToNextCommit(long now) { + if (!this.autoCommit) + return Long.MAX_VALUE; + long timeSinceLastCommit = now - this.lastCommitAttemptMs; + if (timeSinceLastCommit > this.autoCommitIntervalMs) + return 0; + return this.autoCommitIntervalMs - timeSinceLastCommit; + } + /** * Request a metadata update and wait until it has occurred */ @@ -864,7 +1042,7 @@ public class KafkaConsumer implements Consumer { int version = this.metadata.requestUpdate(); do { long now = time.milliseconds(); - this.client.poll(this.retryBackoffMs, now); + this.pollClient(this.retryBackoffMs, now); } while (this.metadata.version() == version); } @@ -882,8 +1060,7 @@ public class KafkaConsumer implements Consumer { } // get new assigned partitions from the coordinator - this.subscriptions.changePartitionAssignment(coordinator.assignPartitions( - new ArrayList(this.subscriptions.subscribedTopics()), now)); + assignPartitions(); // execute the user's callback after rebalance log.debug("Setting newly assigned partitions {}", this.subscriptions.assignedPartitions()); @@ -900,25 +1077,73 @@ public class KafkaConsumer implements Consumer { * or reset it using the offset reset policy the user has configured. * * @param partitions The partitions that needs updating fetch positions - * @param now The current time * @throws org.apache.kafka.clients.consumer.NoOffsetForPartitionException If no offset is stored for a given partition and no offset reset policy is * defined */ - private void updateFetchPositions(Set partitions, long now) { + private void updateFetchPositions(Set partitions) { // first refresh the committed positions in case they are not up-to-date - refreshCommittedOffsets(partitions, now); + refreshCommittedOffsets(partitions); // reset the fetch position to the committed position for (TopicPartition tp : partitions) { - if (subscriptions.fetched(tp) == null) { - if (subscriptions.committed(tp) == null) { - // if the committed position is unknown reset the position - fetcher.resetOffset(tp); - } else { - log.debug("Resetting offset for partition {} to the committed offset {}", - tp, subscriptions.committed(tp)); - subscriptions.seek(tp, subscriptions.committed(tp)); - } + // Skip if we already have a fetch position + if (subscriptions.fetched(tp) != null) + continue; + + // TODO: If there are several offsets to reset, we could submit offset requests in parallel + if (subscriptions.isOffsetResetNeeded(tp)) { + resetOffset(tp); + } else if (subscriptions.committed(tp) == null) { + // There's no committed position, so we need to reset with the default strategy + subscriptions.needOffsetReset(tp); + resetOffset(tp); + } else { + log.debug("Resetting offset for partition {} to the committed offset {}", + tp, subscriptions.committed(tp)); + subscriptions.seek(tp, subscriptions.committed(tp)); + } + } + } + + /** + * Reset offsets for the given partition using the offset reset strategy. + * + * @param partition The given partition that needs reset offset + * @throws org.apache.kafka.clients.consumer.NoOffsetForPartitionException If no offset reset strategy is defined + */ + private void resetOffset(TopicPartition partition) { + OffsetResetStrategy strategy = subscriptions.resetStrategy(partition); + final long timestamp; + if (strategy == OffsetResetStrategy.EARLIEST) + timestamp = EARLIEST_OFFSET_TIMESTAMP; + else if (strategy == OffsetResetStrategy.LATEST) + timestamp = LATEST_OFFSET_TIMESTAMP; + else + throw new NoOffsetForPartitionException("No offset is set and no reset policy is defined"); + + log.debug("Resetting offset for partition {} to {} offset.", partition, strategy.name().toLowerCase()); + long offset = listOffset(partition, timestamp); + this.subscriptions.seek(partition, offset); + } + + /** + * Fetch a single offset before the given timestamp for the partition. + * + * @param partition The partition that needs fetching offset. + * @param timestamp The timestamp for fetching offset. + * @return The offset of the message that is published before the given timestamp + */ + private long listOffset(TopicPartition partition, long timestamp) { + while (true) { + RequestFuture future = fetcher.listOffset(partition, timestamp); + + if (!future.isDone()) + pollFuture(future, requestTimeoutMs); + + if (future.isDone()) { + if (future.succeeded()) + return future.value(); + handleRequestFailure(future); } } } @@ -926,13 +1151,13 @@ public class KafkaConsumer implements Consumer { /** * Refresh the committed offsets for given set of partitions and update the cache */ - private void refreshCommittedOffsets(Set partitions, long now) { + private void refreshCommittedOffsets(Set partitions) { // we only need to fetch latest committed offset from coordinator if there // is some commit process in progress, otherwise our current // committed cache is up-to-date if (subscriptions.refreshCommitsNeeded()) { // contact coordinator to fetch committed offsets - Map offsets = coordinator.fetchOffsets(partitions, now); + Map offsets = fetchCommittedOffsets(partitions); // update the position with the offsets for (Map.Entry entry : offsets.entrySet()) { @@ -942,6 +1167,183 @@ public class KafkaConsumer implements Consumer { } } + /** + * Block until we have received a partition assignment from the coordinator. + */ + private void assignPartitions() { + // Ensure that there are no pending requests to the coordinator. This is important + // in particular to avoid resending a pending JoinGroup request. + awaitCoordinatorInFlightRequests(); + + while (subscriptions.partitionAssignmentNeeded()) { + RequestFuture future = coordinator.assignPartitions(time.milliseconds()); + + // Block indefinitely for the join group request (which can take as long as a session timeout) + if (!future.isDone()) + pollFuture(future); + + if (future.failed()) + handleRequestFailure(future); + } + } + + /** + * Block until the coordinator for this group is known. + */ + private void ensureCoordinatorKnown() { + while (coordinator.coordinatorUnknown()) { + RequestFuture future = coordinator.discoverConsumerCoordinator(); + + if (!future.isDone()) + pollFuture(future, requestTimeoutMs); + + if (future.failed()) + handleRequestFailure(future); + } + } + + /** + * Block until any pending requests to the coordinator have been handled. + */ + public void awaitCoordinatorInFlightRequests() { + while (coordinator.hasInFlightRequests()) { + long now = time.milliseconds(); + pollClient(-1, now); + } + } + + /** + * Lookup the committed offsets for a set of partitions. This will block until the coordinator has + * responded to the offset fetch request. + * @param partitions List of partitions to get offsets for + * @return Map from partition to its respective offset + */ + private Map fetchCommittedOffsets(Set partitions) { + while (true) { + long now = time.milliseconds(); + RequestFuture> future = coordinator.fetchOffsets(partitions, now); + + if (!future.isDone()) + pollFuture(future, requestTimeoutMs); + + if (future.isDone()) { + if (future.succeeded()) + return future.value(); + handleRequestFailure(future); + } + } + } + + /** + * Commit offsets. This call blocks (regardless of commitType) until the coordinator + * can receive the commit request. Once the request has been made, however, only the + * synchronous commits will wait for a successful response from the coordinator. + * @param offsets Offsets to commit. + * @param commitType Commit policy + */ + private void commitOffsets(Map offsets, CommitType commitType) { + if (commitType == CommitType.ASYNC) { + commitOffsetsAsync(offsets); + } else { + commitOffsetsSync(offsets); + } + } + + private void commitOffsetsAsync(Map offsets) { + while (true) { + long now = time.milliseconds(); + RequestFuture future = coordinator.commitOffsets(offsets, now); + + if (!future.isDone() || future.succeeded()) + return; + + handleRequestFailure(future); + } + } + + private void commitOffsetsSync(Map offsets) { + while (true) { + long now = time.milliseconds(); + RequestFuture future = coordinator.commitOffsets(offsets, now); + + if (!future.isDone()) + pollFuture(future, requestTimeoutMs); + + if (future.isDone()) { + if (future.succeeded()) + return; + else + handleRequestFailure(future); + } + } + } + + private void handleRequestFailure(RequestFuture future) { + if (future.hasException()) + throw future.exception(); + + switch (future.retryAction()) { + case BACKOFF: + Utils.sleep(retryBackoffMs); + break; + case POLL: + pollClient(retryBackoffMs, time.milliseconds()); + break; + case FIND_COORDINATOR: + ensureCoordinatorKnown(); + break; + case REFRESH_METADATA: + awaitMetadataUpdate(); + break; + case NOOP: + // Do nothing (retry now) + } + } + + /** + * Poll until a result is ready or timeout expires + * @param future The future to poll for + * @param timeout The time in milliseconds to wait for the result + */ + private void pollFuture(RequestFuture future, long timeout) { + // TODO: Update this code for KAFKA-2120, which adds request timeout to NetworkClient + // In particular, we must ensure that "timed out" requests will not have their callbacks + // invoked at a later time. + long remaining = timeout; + while (!future.isDone() && remaining >= 0) { + long start = time.milliseconds(); + pollClient(remaining, start); + if (future.isDone()) return; + remaining -= time.milliseconds() - start; + } + } + + /** + * Poll indefinitely until the result is ready. + * @param future The future to poll for. + */ + private void pollFuture(RequestFuture future) { + while (!future.isDone()) { + long now = time.milliseconds(); + pollClient(-1, now); + } + } + + /** + * Poll for IO. + * @param timeout The maximum time to wait for IO to become available + * @param now The current time in milliseconds + * @throws ConsumerWakeupException if {@link #wakeup()} is invoked while the poll is active + */ + private void pollClient(long timeout, long now) { + this.client.poll(timeout, now); + + if (wakeup.get()) { + wakeup.set(false); + throw new ConsumerWakeupException(); + } + } + /* * Check that the consumer hasn't been closed. */ @@ -949,4 +1351,27 @@ public class KafkaConsumer implements Consumer { if (this.closed) throw new IllegalStateException("This consumer has already been closed."); } + + /** + * Acquire the light lock protecting this consumer from multi-threaded access. Instead of blocking + * when the lock is not available, however, we just throw an exception (since multi-threaded usage is not + * supported). + * @throws IllegalStateException if the consumer has been closed + * @throws ConcurrentModificationException if another thread already has the lock + */ + private void acquire() { + ensureNotClosed(); + Long threadId = Thread.currentThread().getId(); + if (!threadId.equals(currentThread.get()) && !currentThread.compareAndSet(null, threadId)) + throw new ConcurrentModificationException("KafkaConsumer is not safe for multi-threaded access"); + refcount.incrementAndGet(); + } + + /** + * Release the light lock protecting the consumer from multi-threaded access. + */ + private void release() { + if (refcount.decrementAndGet() == 0) + currentThread.set(null); + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java index f50da82..46e26a6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java @@ -40,8 +40,8 @@ public class MockConsumer implements Consumer { private Map>> records; private boolean closed; - public MockConsumer() { - this.subscriptions = new SubscriptionState(); + public MockConsumer(OffsetResetStrategy offsetResetStrategy) { + this.subscriptions = new SubscriptionState(offsetResetStrategy); this.partitions = new HashMap>(); this.records = new HashMap>>(); this.closed = false; @@ -175,6 +175,11 @@ public class MockConsumer implements Consumer { this.closed = true; } + @Override + public void wakeup() { + + } + private void ensureNotClosed() { if (this.closed) throw new IllegalStateException("This consumer has already been closed."); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetResetStrategy.java b/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetResetStrategy.java new file mode 100644 index 0000000..542da7f --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetResetStrategy.java @@ -0,0 +1,17 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package org.apache.kafka.clients.consumer; + +public enum OffsetResetStrategy { + LATEST, EARLIEST, NONE +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Coordinator.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Coordinator.java index 41cb945..c1c8172 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Coordinator.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Coordinator.java @@ -15,7 +15,6 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.ClientRequest; import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.KafkaClient; -import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.RequestCompletionHandler; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.MetricName; @@ -57,7 +56,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; /** - * This class manage the coordination process with the consumer coordinator. + * This class manages the coordination process with the consumer coordinator. */ public final class Coordinator { @@ -67,13 +66,11 @@ public final class Coordinator { private final Time time; private final String groupId; - private final Metadata metadata; private final Heartbeat heartbeat; private final int sessionTimeoutMs; private final String assignmentStrategy; private final SubscriptionState subscriptions; private final CoordinatorMetrics sensors; - private final long retryBackoffMs; private Node consumerCoordinator; private String consumerId; private int generation; @@ -83,10 +80,8 @@ public final class Coordinator { */ public Coordinator(KafkaClient client, String groupId, - long retryBackoffMs, int sessionTimeoutMs, String assignmentStrategy, - Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, @@ -98,10 +93,8 @@ public final class Coordinator { this.generation = -1; this.consumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID; this.groupId = groupId; - this.metadata = metadata; this.consumerCoordinator = null; this.subscriptions = subscriptions; - this.retryBackoffMs = retryBackoffMs; this.sessionTimeoutMs = sessionTimeoutMs; this.assignmentStrategy = assignmentStrategy; this.heartbeat = new Heartbeat(this.sessionTimeoutMs, time.milliseconds()); @@ -109,84 +102,110 @@ public final class Coordinator { } /** - * Assign partitions for the subscribed topics. - * - * @param subscribedTopics The subscribed topics list - * @param now The current time - * @return The assigned partition info + * Send a request to get a new partition assignment. This is a non-blocking call which sends + * a JoinGroup request to the coordinator (if it is available). The returned future must + * be polled to see if the request completed successfully. + * @param now The current time in milliseconds + * @return A request future whose completion indicates the result of the JoinGroup request. */ - public List assignPartitions(List subscribedTopics, long now) { + public RequestFuture assignPartitions(final long now) { + final RequestFuture future = newCoordinatorRequestFuture(now); + if (future.isDone()) return future; // send a join group request to the coordinator + List subscribedTopics = new ArrayList(subscriptions.subscribedTopics()); log.debug("(Re-)joining group {} with subscribed topics {}", groupId, subscribedTopics); - // repeat processing the response until succeed or fatal error - do { - JoinGroupRequest request = new JoinGroupRequest(groupId, + JoinGroupRequest request = new JoinGroupRequest(groupId, this.sessionTimeoutMs, subscribedTopics, this.consumerId, this.assignmentStrategy); - ClientResponse resp = this.blockingCoordinatorRequest(ApiKeys.JOIN_GROUP, request.toStruct(), null, now); - JoinGroupResponse response = new JoinGroupResponse(resp.responseBody()); - short errorCode = response.errorCode(); + // create the request for the coordinator + log.debug("Issuing request ({}: {}) to coordinator {}", ApiKeys.JOIN_GROUP, request, this.consumerCoordinator.id()); + + RequestCompletionHandler completionHandler = new RequestCompletionHandler() { + @Override + public void onComplete(ClientResponse resp) { + handleJoinResponse(resp, future); + } + }; + + sendCoordinator(ApiKeys.JOIN_GROUP, request.toStruct(), completionHandler, now); + return future; + } + + private void handleJoinResponse(ClientResponse response, RequestFuture future) { + if (response.wasDisconnected()) { + handleCoordinatorDisconnect(response); + future.retryWithNewCoordinator(); + } else { + // process the response + JoinGroupResponse joinResponse = new JoinGroupResponse(response.responseBody()); + short errorCode = joinResponse.errorCode(); if (errorCode == Errors.NONE.code()) { - this.consumerId = response.consumerId(); - this.generation = response.generationId(); + Coordinator.this.consumerId = joinResponse.consumerId(); + Coordinator.this.generation = joinResponse.generationId(); // set the flag to refresh last committed offsets - this.subscriptions.needRefreshCommits(); + subscriptions.needRefreshCommits(); log.debug("Joined group: {}", response); // record re-assignment time - this.sensors.partitionReassignments.record(time.milliseconds() - now); + this.sensors.partitionReassignments.record(response.requestLatencyMs()); - // return assigned partitions - return response.assignedPartitions(); + // update partition assignment + subscriptions.changePartitionAssignment(joinResponse.assignedPartitions()); + future.complete(null); } else if (errorCode == Errors.UNKNOWN_CONSUMER_ID.code()) { // reset the consumer id and retry immediately - this.consumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID; + Coordinator.this.consumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID; log.info("Attempt to join group {} failed due to unknown consumer id, resetting and retrying.", - groupId); + groupId); + + future.retryNow(); } else if (errorCode == Errors.CONSUMER_COORDINATOR_NOT_AVAILABLE.code() || errorCode == Errors.NOT_COORDINATOR_FOR_CONSUMER.code()) { // re-discover the coordinator and retry with backoff coordinatorDead(); - Utils.sleep(this.retryBackoffMs); - log.info("Attempt to join group {} failed due to obsolete coordinator information, retrying.", - groupId); + groupId); + future.retryWithNewCoordinator(); } else if (errorCode == Errors.UNKNOWN_PARTITION_ASSIGNMENT_STRATEGY.code() || errorCode == Errors.INCONSISTENT_PARTITION_ASSIGNMENT_STRATEGY.code() || errorCode == Errors.INVALID_SESSION_TIMEOUT.code()) { // log the error and re-throw the exception + KafkaException e = Errors.forCode(errorCode).exception(); log.error("Attempt to join group {} failed due to: {}", - groupId, Errors.forCode(errorCode).exception().getMessage()); - Errors.forCode(errorCode).maybeThrow(); + groupId, e.getMessage()); + future.raise(e); } else { // unexpected error, throw the exception - throw new KafkaException("Unexpected error in join group response: " - + Errors.forCode(response.errorCode()).exception().getMessage()); + future.raise(new KafkaException("Unexpected error in join group response: " + + Errors.forCode(joinResponse.errorCode()).exception().getMessage())); } - } while (true); + } } /** - * Commit offsets for the specified list of topics and partitions. - * - * A non-blocking commit will attempt to commit offsets asychronously. No error will be thrown if the commit fails. - * A blocking commit will wait for a response acknowledging the commit. In the event of an error it will retry until - * the commit succeeds. + * Commit offsets for the specified list of topics and partitions. This is a non-blocking call + * which returns a request future that can be polled in the case of a synchronous commit or ignored in the + * asynchronous case. * * @param offsets The list of offsets per partition that should be committed. - * @param blocking Control whether the commit is blocking * @param now The current time + * @return A request future whose value indicates whether the commit was successful or not */ - public void commitOffsets(final Map offsets, boolean blocking, long now) { - if (!offsets.isEmpty()) { + public RequestFuture commitOffsets(final Map offsets, long now) { + final RequestFuture future = newCoordinatorRequestFuture(now); + if (future.isDone()) return future; + + if (offsets.isEmpty()) { + future.complete(null); + } else { // create the offset commit request Map offsetData; offsetData = new HashMap(offsets.size()); @@ -198,52 +217,63 @@ public final class Coordinator { OffsetCommitRequest.DEFAULT_RETENTION_TIME, offsetData); - // send request and possibly wait for response if it is blocking - RequestCompletionHandler handler = new CommitOffsetCompletionHandler(offsets); + RequestCompletionHandler handler = new OffsetCommitCompletionHandler(offsets, future); + sendCoordinator(ApiKeys.OFFSET_COMMIT, req.toStruct(), handler, now); + } - if (blocking) { - boolean done; - do { - ClientResponse response = blockingCoordinatorRequest(ApiKeys.OFFSET_COMMIT, req.toStruct(), handler, now); + return future; + } - // check for errors - done = true; - OffsetCommitResponse commitResponse = new OffsetCommitResponse(response.responseBody()); - for (short errorCode : commitResponse.responseData().values()) { - if (errorCode != Errors.NONE.code()) - done = false; - } - if (!done) { - log.debug("Error in offset commit, backing off for {} ms before retrying again.", - this.retryBackoffMs); - Utils.sleep(this.retryBackoffMs); - } - } while (!done); - } else { - this.client.send(initiateCoordinatorRequest(ApiKeys.OFFSET_COMMIT, req.toStruct(), handler, now)); - } + private RequestFuture newCoordinatorRequestFuture(long now) { + if (coordinatorUnknown()) + return RequestFuture.newCoordinatorNeeded(); + + if (client.ready(this.consumerCoordinator, now)) + // We have an open connection and we're ready to send + return new RequestFuture(); + + if (this.client.connectionFailed(this.consumerCoordinator)) { + coordinatorDead(); + return RequestFuture.newCoordinatorNeeded(); } + + // The connection has been initiated, so we need to poll to finish it + return RequestFuture.pollNeeded(); } /** - * Fetch the committed offsets of the given set of partitions. + * Fetch the committed offsets for a set of partitions. This is a non-blocking call. The + * returned future can be polled to get the actual offsets returned from the broker. * - * @param partitions The list of partitions which need to ask for committed offsets - * @param now The current time - * @return The fetched offset values + * @param partitions The set of partitions to get offsets for. + * @param now The current time in milliseconds + * @return A request future containing the committed offsets. */ - public Map fetchOffsets(Set partitions, long now) { - log.debug("Fetching committed offsets for partitions: " + Utils.join(partitions, ", ")); - - while (true) { - // construct the request - OffsetFetchRequest request = new OffsetFetchRequest(this.groupId, new ArrayList(partitions)); + public RequestFuture> fetchOffsets(Set partitions, long now) { + final RequestFuture> future = newCoordinatorRequestFuture(now); + if (future.isDone()) return future; - // send the request and block on waiting for response - ClientResponse resp = this.blockingCoordinatorRequest(ApiKeys.OFFSET_FETCH, request.toStruct(), null, now); + log.debug("Fetching committed offsets for partitions: " + Utils.join(partitions, ", ")); + // construct the request + OffsetFetchRequest request = new OffsetFetchRequest(this.groupId, new ArrayList(partitions)); + + // send the request with a callback + RequestCompletionHandler completionHandler = new RequestCompletionHandler() { + @Override + public void onComplete(ClientResponse resp) { + handleOffsetFetchResponse(resp, future); + } + }; + sendCoordinator(ApiKeys.OFFSET_FETCH, request.toStruct(), completionHandler, now); + return future; + } + private void handleOffsetFetchResponse(ClientResponse resp, RequestFuture> future) { + if (resp.wasDisconnected()) { + handleCoordinatorDisconnect(resp); + future.retryWithNewCoordinator(); + } else { // parse the response to get the offsets - boolean offsetsReady = true; OffsetFetchResponse response = new OffsetFetchResponse(resp.responseBody()); Map offsets = new HashMap(response.responseData().size()); for (Map.Entry entry : response.responseData().entrySet()) { @@ -251,23 +281,22 @@ public final class Coordinator { OffsetFetchResponse.PartitionData data = entry.getValue(); if (data.hasError()) { log.debug("Error fetching offset for topic-partition {}: {}", tp, Errors.forCode(data.errorCode) - .exception() - .getMessage()); + .exception() + .getMessage()); if (data.errorCode == Errors.OFFSET_LOAD_IN_PROGRESS.code()) { // just retry - offsetsReady = false; - Utils.sleep(this.retryBackoffMs); + future.retryAfterBackoff(); } else if (data.errorCode == Errors.NOT_COORDINATOR_FOR_CONSUMER.code()) { // re-discover the coordinator and retry coordinatorDead(); - offsetsReady = false; - Utils.sleep(this.retryBackoffMs); - } else if (data.errorCode == Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) { - // just ignore this partition - log.debug("Unknown topic or partition for " + tp); + future.retryWithNewCoordinator(); + } else if (data.errorCode == Errors.UNKNOWN_CONSUMER_ID.code() + || data.errorCode == Errors.ILLEGAL_GENERATION.code()) { + // need to re-join group + subscriptions.needReassignment(); } else { - throw new KafkaException("Unexpected error in fetch offset response: " - + Errors.forCode(data.errorCode).exception().getMessage()); + future.raise(new KafkaException("Unexpected error in fetch offset response: " + + Errors.forCode(data.errorCode).exception().getMessage())); } } else if (data.offset >= 0) { // record the position with the offset (-1 indicates no committed offset to fetch) @@ -277,8 +306,8 @@ public final class Coordinator { } } - if (offsetsReady) - return offsets; + if (!future.isDone()) + future.complete(offsets); } } @@ -288,124 +317,105 @@ public final class Coordinator { * @param now The current time */ public void maybeHeartbeat(long now) { - if (heartbeat.shouldHeartbeat(now)) { + if (heartbeat.shouldHeartbeat(now) && coordinatorReady(now)) { HeartbeatRequest req = new HeartbeatRequest(this.groupId, this.generation, this.consumerId); - this.client.send(initiateCoordinatorRequest(ApiKeys.HEARTBEAT, req.toStruct(), new HeartbeatCompletionHandler(), now)); + sendCoordinator(ApiKeys.HEARTBEAT, req.toStruct(), new HeartbeatCompletionHandler(), now); this.heartbeat.sentHeartbeat(now); } } - public boolean coordinatorUnknown() { - return this.consumerCoordinator == null; - } - /** - * Repeatedly attempt to send a request to the coordinator until a response is received (retry if we are - * disconnected). Note that this means any requests sent this way must be idempotent. - * - * @return The response + * Get the time until the next heartbeat is needed. + * @param now The current time + * @return The duration in milliseconds before the next heartbeat will be needed. */ - private ClientResponse blockingCoordinatorRequest(ApiKeys api, - Struct request, - RequestCompletionHandler handler, - long now) { - while (true) { - ClientRequest coordinatorRequest = initiateCoordinatorRequest(api, request, handler, now); - ClientResponse coordinatorResponse = sendAndReceive(coordinatorRequest, now); - if (coordinatorResponse.wasDisconnected()) { - handleCoordinatorDisconnect(coordinatorResponse); - Utils.sleep(this.retryBackoffMs); - } else { - return coordinatorResponse; - } - } + public long timeToNextHeartbeat(long now) { + return heartbeat.timeToNextHeartbeat(now); } /** - * Ensure the consumer coordinator is known and we have a ready connection to it. + * Check whether the coordinator has any in-flight requests. + * @return true if the coordinator has pending requests. */ - private void ensureCoordinatorReady() { - while (true) { - if (this.consumerCoordinator == null) - discoverCoordinator(); - - while (true) { - boolean ready = this.client.ready(this.consumerCoordinator, time.milliseconds()); - if (ready) { - return; - } else { - log.debug("No connection to coordinator, attempting to connect."); - this.client.poll(this.retryBackoffMs, time.milliseconds()); + public boolean hasInFlightRequests() { + return !coordinatorUnknown() && client.inFlightRequestCount(consumerCoordinator.idString()) > 0; + } - // if the coordinator connection has failed, we need to - // break the inner loop to re-discover the coordinator - if (this.client.connectionFailed(this.consumerCoordinator)) { - log.debug("Coordinator connection failed. Attempting to re-discover."); - coordinatorDead(); - break; - } - } - } - } + public boolean coordinatorUnknown() { + return this.consumerCoordinator == null; } - /** - * Mark the current coordinator as dead. - */ - private void coordinatorDead() { - if (this.consumerCoordinator != null) { - log.info("Marking the coordinator {} dead.", this.consumerCoordinator.id()); - this.consumerCoordinator = null; - } + private boolean coordinatorReady(long now) { + return !coordinatorUnknown() && this.client.ready(this.consumerCoordinator, now); } /** - * Keep discovering the consumer coordinator until it is found. + * Discover the current coordinator for the consumer group. Sends a ConsumerMetadata request to + * one of the brokers. The returned future should be polled to get the result of the request. + * @return A request future which indicates the completion of the metadata request */ - private void discoverCoordinator() { - while (this.consumerCoordinator == null) { - log.debug("No coordinator known, attempting to discover one."); - Node coordinator = fetchConsumerCoordinator(); - - if (coordinator == null) { - log.debug("No coordinator found, backing off."); - Utils.sleep(this.retryBackoffMs); + public RequestFuture discoverConsumerCoordinator() { + // initiate the consumer metadata request + // find a node to ask about the coordinator + long now = time.milliseconds(); + Node node = this.client.leastLoadedNode(now); + + if (node == null) { + return RequestFuture.metadataRefreshNeeded(); + } else if (!this.client.ready(node, now)) { + if (this.client.connectionFailed(node)) { + return RequestFuture.metadataRefreshNeeded(); } else { - log.debug("Found coordinator: " + coordinator); - this.consumerCoordinator = coordinator; + return RequestFuture.pollNeeded(); } + } else { + final RequestFuture future = new RequestFuture(); + + // create a consumer metadata request + log.debug("Issuing consumer metadata request to broker {}", node.id()); + ConsumerMetadataRequest metadataRequest = new ConsumerMetadataRequest(this.groupId); + RequestCompletionHandler completionHandler = new RequestCompletionHandler() { + @Override + public void onComplete(ClientResponse resp) { + handleConsumerMetadataResponse(resp, future); + } + }; + send(node, ApiKeys.CONSUMER_METADATA, metadataRequest.toStruct(), completionHandler, now); + return future; } } - /** - * Get the current consumer coordinator information via consumer metadata request. - * - * @return the consumer coordinator node - */ - private Node fetchConsumerCoordinator() { - - // initiate the consumer metadata request - ClientRequest request = initiateConsumerMetadataRequest(); - - // send the request and wait for its response - ClientResponse response = sendAndReceive(request, request.createdTime()); + private void handleConsumerMetadataResponse(ClientResponse resp, RequestFuture future) { + log.debug("Consumer metadata response {}", resp); // parse the response to get the coordinator info if it is not disconnected, // otherwise we need to request metadata update - if (!response.wasDisconnected()) { - ConsumerMetadataResponse consumerMetadataResponse = new ConsumerMetadataResponse(response.responseBody()); + if (resp.wasDisconnected()) { + future.retryAfterMetadataRefresh(); + } else { + ConsumerMetadataResponse consumerMetadataResponse = new ConsumerMetadataResponse(resp.responseBody()); // use MAX_VALUE - node.id as the coordinator id to mimic separate connections // for the coordinator in the underlying network client layer // TODO: this needs to be better handled in KAFKA-1935 - if (consumerMetadataResponse.errorCode() == Errors.NONE.code()) - return new Node(Integer.MAX_VALUE - consumerMetadataResponse.node().id(), - consumerMetadataResponse.node().host(), - consumerMetadataResponse.node().port()); - } else { - this.metadata.requestUpdate(); + if (consumerMetadataResponse.errorCode() == Errors.NONE.code()) { + this.consumerCoordinator = new Node(Integer.MAX_VALUE - consumerMetadataResponse.node().id(), + consumerMetadataResponse.node().host(), + consumerMetadataResponse.node().port()); + future.complete(null); + } else { + future.retryAfterBackoff(); + } } + } - return null; + /** + * Mark the current coordinator as dead. + */ + private void coordinatorDead() { + if (this.consumerCoordinator != null) { + log.info("Marking the coordinator {} dead.", this.consumerCoordinator.id()); + this.consumerCoordinator = null; + } } /** @@ -414,79 +424,23 @@ public final class Coordinator { private void handleCoordinatorDisconnect(ClientResponse response) { int correlation = response.request().request().header().correlationId(); log.debug("Cancelled request {} with correlation id {} due to coordinator {} being disconnected", - response.request(), - correlation, - response.request().request().destination()); + response.request(), + correlation, + response.request().request().destination()); // mark the coordinator as dead coordinatorDead(); } - /** - * Initiate a consumer metadata request to the least loaded node. - * - * @return The created request - */ - private ClientRequest initiateConsumerMetadataRequest() { - - // find a node to ask about the coordinator - Node node = this.client.leastLoadedNode(time.milliseconds()); - while (node == null || !this.client.ready(node, time.milliseconds())) { - long now = time.milliseconds(); - this.client.poll(this.retryBackoffMs, now); - node = this.client.leastLoadedNode(now); - - // if there is no ready node, backoff before retry - if (node == null) - Utils.sleep(this.retryBackoffMs); - } - - // create a consumer metadata request - log.debug("Issuing consumer metadata request to broker {}", node.id()); - ConsumerMetadataRequest request = new ConsumerMetadataRequest(this.groupId); - RequestSend send = new RequestSend(node.idString(), - this.client.nextRequestHeader(ApiKeys.CONSUMER_METADATA), - request.toStruct()); - long now = time.milliseconds(); - return new ClientRequest(now, true, send, null); + private void sendCoordinator(ApiKeys api, Struct request, RequestCompletionHandler handler, long now) { + send(this.consumerCoordinator, api, request, handler, now); } - /** - * Initiate a request to the coordinator. - */ - private ClientRequest initiateCoordinatorRequest(ApiKeys api, Struct request, RequestCompletionHandler handler, long now) { - - // first make sure the coordinator is known and ready - ensureCoordinatorReady(); - - // create the request for the coordinator - log.debug("Issuing request ({}: {}) to coordinator {}", api, request, this.consumerCoordinator.id()); - + private void send(Node node, ApiKeys api, Struct request, RequestCompletionHandler handler, long now) { RequestHeader header = this.client.nextRequestHeader(api); - RequestSend send = new RequestSend(this.consumerCoordinator.idString(), header, request); - return new ClientRequest(now, true, send, handler); - } - - /** - * Attempt to send a request and receive its response. - * - * @return The response - */ - private ClientResponse sendAndReceive(ClientRequest clientRequest, long now) { - - // send the request - this.client.send(clientRequest); - - // drain all responses from the destination node - List responses = this.client.completeAll(clientRequest.request().destination(), now); - if (responses.isEmpty()) { - throw new IllegalStateException("This should not happen."); - } else { - // other requests should be handled by the callback, and - // we only care about the response of the last request - return responses.get(responses.size() - 1); - } + RequestSend send = new RequestSend(node.idString(), header, request); + this.client.send(new ClientRequest(now, true, send, handler)); } private class HeartbeatCompletionHandler implements RequestCompletionHandler { @@ -518,21 +472,24 @@ public final class Coordinator { } } - private class CommitOffsetCompletionHandler implements RequestCompletionHandler { + private class OffsetCommitCompletionHandler implements RequestCompletionHandler { private final Map offsets; + private final RequestFuture future; - public CommitOffsetCompletionHandler(Map offsets) { + public OffsetCommitCompletionHandler(Map offsets, RequestFuture future) { this.offsets = offsets; + this.future = future; } @Override public void onComplete(ClientResponse resp) { if (resp.wasDisconnected()) { handleCoordinatorDisconnect(resp); + future.retryWithNewCoordinator(); } else { - OffsetCommitResponse response = new OffsetCommitResponse(resp.responseBody()); - for (Map.Entry entry : response.responseData().entrySet()) { + OffsetCommitResponse commitResponse = new OffsetCommitResponse(resp.responseBody()); + for (Map.Entry entry : commitResponse.responseData().entrySet()) { TopicPartition tp = entry.getKey(); short errorCode = entry.getValue(); long offset = this.offsets.get(tp); @@ -542,14 +499,29 @@ public final class Coordinator { } else if (errorCode == Errors.CONSUMER_COORDINATOR_NOT_AVAILABLE.code() || errorCode == Errors.NOT_COORDINATOR_FOR_CONSUMER.code()) { coordinatorDead(); - } else { + future.retryWithNewCoordinator(); + } else if (errorCode == Errors.OFFSET_METADATA_TOO_LARGE.code() + || errorCode == Errors.INVALID_COMMIT_OFFSET_SIZE.code()) { // do not need to throw the exception but just log the error log.error("Error committing partition {} at offset {}: {}", - tp, - offset, - Errors.forCode(errorCode).exception().getMessage()); + tp, + offset, + Errors.forCode(errorCode).exception().getMessage()); + } else if (errorCode == Errors.UNKNOWN_CONSUMER_ID.code() + || errorCode == Errors.ILLEGAL_GENERATION.code()) { + // need to re-join group + subscriptions.needReassignment(); + } else { + // re-throw the exception as these should not happen + log.error("Error committing partition {} at offset {}: {}", + tp, + offset, + Errors.forCode(errorCode).exception().getMessage()); } } + + if (!future.isDone()) + future.complete(null); } sensors.commitLatency.record(resp.requestLatencyMs()); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java index 56281ee..695eaf6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java @@ -19,7 +19,6 @@ import org.apache.kafka.clients.KafkaClient; import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.RequestCompletionHandler; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.NoOffsetForPartitionException; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.Node; @@ -61,9 +60,6 @@ import java.util.Map; public class Fetcher { private static final Logger log = LoggerFactory.getLogger(Fetcher.class); - private static final long EARLIEST_OFFSET_TIMESTAMP = -2L; - private static final long LATEST_OFFSET_TIMESTAMP = -1L; - private final KafkaClient client; @@ -72,23 +68,19 @@ public class Fetcher { private final int maxWaitMs; private final int fetchSize; private final boolean checkCrcs; - private final long retryBackoffMs; private final Metadata metadata; private final FetchManagerMetrics sensors; private final SubscriptionState subscriptions; private final List> records; - private final AutoOffsetResetStrategy offsetResetStrategy; private final Deserializer keyDeserializer; private final Deserializer valueDeserializer; public Fetcher(KafkaClient client, - long retryBackoffMs, int minBytes, int maxWaitMs, int fetchSize, boolean checkCrcs, - String offsetReset, Deserializer keyDeserializer, Deserializer valueDeserializer, Metadata metadata, @@ -102,17 +94,16 @@ public class Fetcher { this.client = client; this.metadata = metadata; this.subscriptions = subscriptions; - this.retryBackoffMs = retryBackoffMs; this.minBytes = minBytes; this.maxWaitMs = maxWaitMs; this.fetchSize = fetchSize; this.checkCrcs = checkCrcs; - this.offsetResetStrategy = AutoOffsetResetStrategy.valueOf(offsetReset); this.keyDeserializer = keyDeserializer; this.valueDeserializer = valueDeserializer; this.records = new LinkedList>(); + this.sensors = new FetchManagerMetrics(metrics, metricGrpPrefix, metricTags); } @@ -166,84 +157,76 @@ public class Fetcher { } /** - * Reset offsets for the given partition using the offset reset strategy. - * - * @param partition The given partition that needs reset offset - * @throws org.apache.kafka.clients.consumer.NoOffsetForPartitionException If no offset reset strategy is defined - */ - public void resetOffset(TopicPartition partition) { - long timestamp; - if (this.offsetResetStrategy == AutoOffsetResetStrategy.EARLIEST) - timestamp = EARLIEST_OFFSET_TIMESTAMP; - else if (this.offsetResetStrategy == AutoOffsetResetStrategy.LATEST) - timestamp = LATEST_OFFSET_TIMESTAMP; - else - throw new NoOffsetForPartitionException("No offset is set and no reset policy is defined"); - - log.debug("Resetting offset for partition {} to {} offset.", partition, this.offsetResetStrategy.name() - .toLowerCase()); - this.subscriptions.seek(partition, offsetBefore(partition, timestamp)); - } - - /** * Fetch a single offset before the given timestamp for the partition. * * @param topicPartition The partition that needs fetching offset. * @param timestamp The timestamp for fetching offset. - * @return The offset of the message that is published before the given timestamp + * @return A response which can be polled to obtain the corresponding offset. */ - public long offsetBefore(TopicPartition topicPartition, long timestamp) { - log.debug("Fetching offsets for partition {}.", topicPartition); + public RequestFuture listOffset(final TopicPartition topicPartition, long timestamp) { Map partitions = new HashMap(1); partitions.put(topicPartition, new ListOffsetRequest.PartitionData(timestamp, 1)); - while (true) { - long now = time.milliseconds(); - PartitionInfo info = metadata.fetch().partition(topicPartition); - if (info == null) { - metadata.add(topicPartition.topic()); - log.debug("Partition {} is unknown for fetching offset, wait for metadata refresh", topicPartition); - awaitMetadataUpdate(); - } else if (info.leader() == null) { - log.debug("Leader for partition {} unavailable for fetching offset, wait for metadata refresh", topicPartition); - awaitMetadataUpdate(); - } else if (this.client.ready(info.leader(), now)) { - Node node = info.leader(); - ListOffsetRequest request = new ListOffsetRequest(-1, partitions); - RequestSend send = new RequestSend(node.idString(), + long now = time.milliseconds(); + PartitionInfo info = metadata.fetch().partition(topicPartition); + if (info == null) { + metadata.add(topicPartition.topic()); + log.debug("Partition {} is unknown for fetching offset, wait for metadata refresh", topicPartition); + return RequestFuture.metadataRefreshNeeded(); + } else if (info.leader() == null) { + log.debug("Leader for partition {} unavailable for fetching offset, wait for metadata refresh", topicPartition); + return RequestFuture.metadataRefreshNeeded(); + } else if (this.client.ready(info.leader(), now)) { + final RequestFuture future = new RequestFuture(); + Node node = info.leader(); + ListOffsetRequest request = new ListOffsetRequest(-1, partitions); + RequestSend send = new RequestSend(node.idString(), this.client.nextRequestHeader(ApiKeys.LIST_OFFSETS), request.toStruct()); - ClientRequest clientRequest = new ClientRequest(now, true, send, null); - this.client.send(clientRequest); - List responses = this.client.completeAll(node.idString(), now); - if (responses.isEmpty()) - throw new IllegalStateException("This should not happen."); - ClientResponse response = responses.get(responses.size() - 1); - if (response.wasDisconnected()) { - awaitMetadataUpdate(); - } else { - ListOffsetResponse lor = new ListOffsetResponse(response.responseBody()); - short errorCode = lor.responseData().get(topicPartition).errorCode; - if (errorCode == Errors.NONE.code()) { - List offsets = lor.responseData().get(topicPartition).offsets; - if (offsets.size() != 1) - throw new IllegalStateException("This should not happen."); - long offset = offsets.get(0); - log.debug("Fetched offset {} for partition {}", offset, topicPartition); - return offset; - } else if (errorCode == Errors.NOT_LEADER_FOR_PARTITION.code() - || errorCode == Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) { - log.warn("Attempt to fetch offsets for partition {} failed due to obsolete leadership information, retrying.", - topicPartition); - awaitMetadataUpdate(); - } else { - log.error("Attempt to fetch offsets for partition {} failed due to: {}", - topicPartition, Errors.forCode(errorCode).exception().getMessage()); - awaitMetadataUpdate(); - } + RequestCompletionHandler completionHandler = new RequestCompletionHandler() { + @Override + public void onComplete(ClientResponse resp) { + handleListOffsetResponse(topicPartition, resp, future); } + }; + ClientRequest clientRequest = new ClientRequest(now, true, send, completionHandler); + this.client.send(clientRequest); + return future; + } else { + // We initiated a connect to the leader, but we need to poll to finish it. + return RequestFuture.pollNeeded(); + } + } + + /** + * Callback for the response of the list offset call above. + * @param topicPartition The partition that was fetched + * @param clientResponse The response from the server. + */ + private void handleListOffsetResponse(TopicPartition topicPartition, + ClientResponse clientResponse, + RequestFuture future) { + if (clientResponse.wasDisconnected()) { + future.retryAfterMetadataRefresh(); + } else { + ListOffsetResponse lor = new ListOffsetResponse(clientResponse.responseBody()); + short errorCode = lor.responseData().get(topicPartition).errorCode; + if (errorCode == Errors.NONE.code()) { + List offsets = lor.responseData().get(topicPartition).offsets; + if (offsets.size() != 1) + throw new IllegalStateException("This should not happen."); + long offset = offsets.get(0); + log.debug("Fetched offset {} for partition {}", offset, topicPartition); + + future.complete(offset); + } else if (errorCode == Errors.NOT_LEADER_FOR_PARTITION.code() + || errorCode == Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) { + log.warn("Attempt to fetch offsets for partition {} failed due to obsolete leadership information, retrying.", + topicPartition); + future.retryAfterMetadataRefresh(); } else { - log.debug("Leader for partition {} is not ready, retry fetching offsets", topicPartition); - client.poll(this.retryBackoffMs, now); + log.error("Attempt to fetch offsets for partition {} failed due to: {}", + topicPartition, Errors.forCode(errorCode).exception().getMessage()); + future.retryAfterMetadataRefresh(); } } } @@ -257,8 +240,10 @@ public class Fetcher { Map> fetchable = new HashMap>(); for (TopicPartition partition : subscriptions.assignedPartitions()) { Node node = cluster.leaderFor(partition); - // if there is a leader and no in-flight requests, issue a new fetch - if (node != null && this.client.inFlightRequestCount(node.idString()) == 0) { + if (node == null) { + metadata.requestUpdate(); + } else if (this.client.inFlightRequestCount(node.idString()) == 0) { + // if there is a leader and no in-flight requests, issue a new fetch Map fetch = fetchable.get(node.id()); if (fetch == null) { fetch = new HashMap(); @@ -327,7 +312,7 @@ public class Fetcher { } else if (partition.errorCode == Errors.OFFSET_OUT_OF_RANGE.code()) { // TODO: this could be optimized by grouping all out-of-range partitions log.info("Fetch offset {} is out of range, resetting offset", subscriptions.fetched(tp)); - resetOffset(tp); + subscriptions.needOffsetReset(tp); } else if (partition.errorCode == Errors.UNKNOWN.code()) { log.warn("Unknown error fetching data for topic-partition {}", tp); } else { @@ -356,17 +341,6 @@ public class Fetcher { return new ConsumerRecord(partition.topic(), partition.partition(), offset, key, value); } - /* - * Request a metadata update and wait until it has occurred - */ - private void awaitMetadataUpdate() { - int version = this.metadata.requestUpdate(); - do { - long now = time.milliseconds(); - this.client.poll(this.retryBackoffMs, now); - } while (this.metadata.version() == version); - } - private static class PartitionRecords { public long fetchOffset; public TopicPartition partition; @@ -379,9 +353,6 @@ public class Fetcher { } } - private static enum AutoOffsetResetStrategy { - LATEST, EARLIEST, NONE - } private class FetchManagerMetrics { public final Metrics metrics; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Heartbeat.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Heartbeat.java index e7cfaaa..51eae19 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Heartbeat.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Heartbeat.java @@ -42,4 +42,14 @@ public final class Heartbeat { public long lastHeartbeatSend() { return this.lastHeartbeatSend; } + + public long timeToNextHeartbeat(long now) { + long timeSinceLastHeartbeat = now - lastHeartbeatSend; + + long hbInterval = timeout / HEARTBEATS_PER_SESSION_INTERVAL; + if (timeSinceLastHeartbeat > hbInterval) + return 0; + else + return hbInterval - timeSinceLastHeartbeat; + } } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java new file mode 100644 index 0000000..13fc9af --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java @@ -0,0 +1,209 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals; + +/** + * Result of an asynchronous request through {@link org.apache.kafka.clients.KafkaClient}. To get the + * result of the request, you must use poll using {@link org.apache.kafka.clients.KafkaClient#poll(long, long)} + * until {@link #isDone()} returns true. Typical usage might look like this: + * + *

    + *     RequestFuture future = sendRequest();
    + *     while (!future.isDone()) {
    + *         client.poll(timeout, now);
    + *     }
    + *
    + *     switch (future.outcome()) {
    + *     case SUCCESS:
    + *         // handle request success
    + *         break;
    + *     case NEED_RETRY:
    + *         // retry after taking possible retry action
    + *         break;
    + *     case EXCEPTION:
    + *         // handle exception
    +  *     }
    + * 
    + * + * When {@link #isDone()} returns true, there are three possible outcomes (obtained through {@link #outcome()}): + * + *
      + *
    1. {@link org.apache.kafka.clients.consumer.internals.RequestFuture.Outcome#SUCCESS}: If the request was + * successful, then you can use {@link #value()} to obtain the result.
    2. + *
    3. {@link org.apache.kafka.clients.consumer.internals.RequestFuture.Outcome#EXCEPTION}: If an unhandled exception + * was encountered, you can use {@link #exception()} to get it.
    4. + *
    5. {@link org.apache.kafka.clients.consumer.internals.RequestFuture.Outcome#NEED_RETRY}: The request may + * not have been successful, but the failure may be ephemeral and the caller just needs to try the request again. + * In this case, use {@link #retryAction()} to determine what action should be taken (if any) before + * retrying.
    6. + *
    + * + * @param Return type of the result (Can be Void if there is no response) + */ +public class RequestFuture { + public static final RequestFuture NEED_NEW_COORDINATOR = newRetryFuture(RetryAction.FIND_COORDINATOR); + public static final RequestFuture NEED_POLL = newRetryFuture(RetryAction.POLL); + public static final RequestFuture NEED_METADATA_REFRESH = newRetryFuture(RetryAction.REFRESH_METADATA); + + public enum RetryAction { + NOOP, // Retry immediately. + POLL, // Retry after calling poll (e.g. to finish a connection) + BACKOFF, // Retry after a delay + FIND_COORDINATOR, // Find a new coordinator before retrying + REFRESH_METADATA // Refresh metadata before retrying + } + + public enum Outcome { + SUCCESS, + NEED_RETRY, + EXCEPTION + } + + private Outcome outcome; + private RetryAction retryAction; + private T value; + private RuntimeException exception; + + /** + * Check whether the response is ready to be handled + * @return true if the response is ready, false otherwise + */ + public boolean isDone() { + return outcome != null; + } + + /** + * Get the value corresponding to this request (if it has one, as indicated by {@link #outcome()}). + * @return the value if it exists or null + */ + public T value() { + return value; + } + + /** + * Check if the request succeeded; + * @return true if a value is available, false otherwise + */ + public boolean succeeded() { + return outcome == Outcome.SUCCESS; + } + + /** + * Check if the request completed failed. + * @return true if the request failed (whether or not it can be retried) + */ + public boolean failed() { + return outcome != Outcome.SUCCESS; + } + + /** + * Return the error from this response (assuming {@link #succeeded()} has returned false. If the + * response is not ready or if there is no retryAction, null is returned. + * @return the error if it exists or null + */ + public RetryAction retryAction() { + return retryAction; + } + + /** + * Get the exception from a failed result. You should check that there is an exception + * with {@link #hasException()} before using this method. + * @return The exception if it exists or null + */ + public RuntimeException exception() { + return exception; + } + + /** + * Check whether there was an exception. + * @return true if this request failed with an exception + */ + public boolean hasException() { + return outcome == Outcome.EXCEPTION; + } + + /** + * Check the outcome of the future if it is ready. + * @return the outcome or null if the future is not finished + */ + public Outcome outcome() { + return outcome; + } + + /** + * The request failed, but should be retried using the provided retry action. + * @param retryAction The action that should be taken by the caller before retrying the request + */ + public void retry(RetryAction retryAction) { + this.outcome = Outcome.NEED_RETRY; + this.retryAction = retryAction; + } + + public void retryNow() { + retry(RetryAction.NOOP); + } + + public void retryAfterBackoff() { + retry(RetryAction.BACKOFF); + } + + public void retryWithNewCoordinator() { + retry(RetryAction.FIND_COORDINATOR); + } + + public void retryAfterMetadataRefresh() { + retry(RetryAction.REFRESH_METADATA); + } + + /** + * Complete the request successfully. After this call, {@link #succeeded()} will return true + * and the value can be obtained through {@link #value()}. + * @param value corresponding value (or null if there is none) + */ + public void complete(T value) { + this.outcome = Outcome.SUCCESS; + this.value = value; + } + + /** + * Raise an exception. The request will be marked as failed, and the caller can either + * handle the exception or throw it. + * @param e The exception that + */ + public void raise(RuntimeException e) { + this.outcome = Outcome.EXCEPTION; + this.exception = e; + } + + private static RequestFuture newRetryFuture(RetryAction retryAction) { + RequestFuture result = new RequestFuture(); + result.retry(retryAction); + return result; + } + + @SuppressWarnings("unchecked") + public static RequestFuture pollNeeded() { + return (RequestFuture) NEED_POLL; + } + + @SuppressWarnings("unchecked") + public static RequestFuture metadataRefreshNeeded() { + return (RequestFuture) NEED_METADATA_REFRESH; + } + + @SuppressWarnings("unchecked") + public static RequestFuture newCoordinatorNeeded() { + return (RequestFuture) NEED_NEW_COORDINATOR; + } + +} diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java index cee7541..6837453 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java @@ -12,14 +12,15 @@ */ package org.apache.kafka.clients.consumer.internals; +import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.common.TopicPartition; + import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import org.apache.kafka.common.TopicPartition; - /** * A class for tracking the topics, partitions, and offsets for the consumer */ @@ -49,7 +50,14 @@ public class SubscriptionState { /* do we need to request the latest committed offsets from the coordinator? */ private boolean needsFetchCommittedOffsets; - public SubscriptionState() { + /* Partitions that need to be reset before fetching */ + private Map resetPartitions; + + /* Default offset reset strategy */ + private OffsetResetStrategy offsetResetStrategy; + + public SubscriptionState(OffsetResetStrategy offsetResetStrategy) { + this.offsetResetStrategy = offsetResetStrategy; this.subscribedTopics = new HashSet(); this.subscribedPartitions = new HashSet(); this.assignedPartitions = new HashSet(); @@ -58,6 +66,7 @@ public class SubscriptionState { this.committed = new HashMap(); this.needsPartitionAssignment = false; this.needsFetchCommittedOffsets = true; // initialize to true for the consumers to fetch offset upon starting up + this.resetPartitions = new HashMap(); } public void subscribe(String topic) { @@ -102,12 +111,14 @@ public class SubscriptionState { this.committed.remove(tp); this.fetched.remove(tp); this.consumed.remove(tp); + this.resetPartitions.remove(tp); } public void clearAssignment() { this.assignedPartitions.clear(); this.committed.clear(); this.fetched.clear(); + this.consumed.clear(); this.needsPartitionAssignment = !subscribedTopics().isEmpty(); } @@ -145,6 +156,7 @@ public class SubscriptionState { public void seek(TopicPartition tp, long offset) { fetched(tp, offset); consumed(tp, offset); + resetPartitions.remove(tp); } public Set assignedPartitions() { @@ -169,6 +181,28 @@ public class SubscriptionState { return this.consumed; } + public void needOffsetReset(TopicPartition partition, OffsetResetStrategy offsetResetStrategy) { + this.resetPartitions.put(partition, offsetResetStrategy); + this.fetched.remove(partition); + this.consumed.remove(partition); + } + + public void needOffsetReset(TopicPartition partition) { + needOffsetReset(partition, offsetResetStrategy); + } + + public boolean isOffsetResetNeeded(TopicPartition partition) { + return resetPartitions.containsKey(partition); + } + + public boolean isOffsetResetNeeded() { + return !resetPartitions.isEmpty(); + } + + public OffsetResetStrategy resetStrategy(TopicPartition partition) { + return resetPartitions.get(partition); + } + public boolean hasAllFetchPositions() { return this.fetched.size() >= this.assignedPartitions.size(); } @@ -192,4 +226,5 @@ public class SubscriptionState { this.needsPartitionAssignment = false; } + } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java index fc3fe26..c4621e2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java @@ -404,6 +404,10 @@ public class KafkaProducer implements Producer { } catch (InterruptedException e) { this.errors.record(); throw new InterruptException(e); + } catch (BufferExhaustedException e) { + this.errors.record(); + this.metrics.sensor("buffer-exhausted-records").record(); + throw e; } catch (KafkaException e) { this.errors.record(); throw e; @@ -523,7 +527,8 @@ public class KafkaProducer implements Producer { * If close() is called from {@link Callback}, a warning message will be logged and close(0, TimeUnit.MILLISECONDS) * will be called instead. We do this because the sender thread would otherwise try to join itself and * block forever. - *

    + *

    + * * @throws InterruptException If the thread is interrupted while blocked */ @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/ErrorLoggingCallback.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/ErrorLoggingCallback.java index 678d1c6..747e29f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/ErrorLoggingCallback.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/ErrorLoggingCallback.java @@ -23,12 +23,18 @@ public class ErrorLoggingCallback implements Callback { private String topic; private byte[] key; private byte[] value; + private int valueLength; private boolean logAsString; public ErrorLoggingCallback(String topic, byte[] key, byte[] value, boolean logAsString) { this.topic = topic; this.key = key; - this.value = value; + + if (logAsString) { + this.value = value; + } + + this.valueLength = value == null ? -1 : value.length; this.logAsString = logAsString; } @@ -36,10 +42,10 @@ public class ErrorLoggingCallback implements Callback { if (e != null) { String keyString = (key == null) ? "null" : logAsString ? new String(key) : key.length + " bytes"; - String valueString = (value == null) ? "null" : - logAsString ? new String(value) : value.length + " bytes"; + String valueString = (valueLength == -1) ? "null" : + logAsString ? new String(value) : valueLength + " bytes"; log.error("Error when sending message to topic {} with key: {}, value: {} with error: {}", - topic, keyString, valueString, e.getMessage()); + topic, keyString, valueString, e.getMessage()); } } } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java index 87dbd64..a152bd7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java @@ -21,6 +21,8 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.metrics.stats.Rate; import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.Record; @@ -112,7 +114,6 @@ public final class RecordAccumulator { } private void registerMetrics(Metrics metrics, String metricGrpName, Map metricTags) { - MetricName metricName = new MetricName("waiting-threads", metricGrpName, "The number of user threads blocked waiting for buffer memory to enqueue their records", metricTags); Measurable waitingThreads = new Measurable() { public double measure(MetricConfig config, long now) { @@ -120,7 +121,7 @@ public final class RecordAccumulator { } }; metrics.addMetric(metricName, waitingThreads); - + metricName = new MetricName("buffer-total-bytes", metricGrpName, "The maximum amount of buffer memory the client can use (whether or not it is currently used).", metricTags); Measurable totalBytes = new Measurable() { public double measure(MetricConfig config, long now) { @@ -128,6 +129,7 @@ public final class RecordAccumulator { } }; metrics.addMetric(metricName, totalBytes); + metricName = new MetricName("buffer-available-bytes", metricGrpName, "The total amount of buffer memory that is not being used (either unallocated or in the free list).", metricTags); Measurable availableBytes = new Measurable() { public double measure(MetricConfig config, long now) { @@ -135,6 +137,10 @@ public final class RecordAccumulator { } }; metrics.addMetric(metricName, availableBytes); + + Sensor bufferExhaustedRecordSensor = metrics.sensor("buffer-exhausted-records"); + metricName = new MetricName("buffer-exhausted-rate", metricGrpName, "The average per-second number of record sends that are dropped due to buffer exhaustion", metricTags); + bufferExhaustedRecordSensor.add(metricName, new Rate()); } /** diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index 0bd2e1e..ba132ed 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -284,6 +284,7 @@ public class Selector implements Selectable { } catch (InvalidReceiveException e) { log.error("Invalid data received from " + channel.id() + " closing connection", e); close(channel.id()); + this.disconnected.add(channel.id()); throw e; } } diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java index 5b898c8..4c0ecc3 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java @@ -77,7 +77,11 @@ public enum Errors { UNKNOWN_CONSUMER_ID(25, new ApiException("The coordinator is not aware of this consumer.")), INVALID_SESSION_TIMEOUT(26, - new ApiException("The session timeout is not within an acceptable range.")); + new ApiException("The session timeout is not within an acceptable range.")), + COMMITTING_PARTITIONS_NOT_ASSIGNED(27, + new ApiException("Some of the committing partitions are not assigned the committer")), + INVALID_COMMIT_OFFSET_SIZE(28, + new ApiException("The committing offset data size is not valid")); private static Map, Errors> classToError = new HashMap, Errors>(); private static Map codeToError = new HashMap(); diff --git a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java index b2db240..5f1b45c 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java +++ b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java @@ -115,9 +115,11 @@ public class MemoryRecords implements Records { * Close this batch for no more appends */ public void close() { - compressor.close(); - writable = false; - buffer = compressor.buffer(); + if (writable) { + compressor.close(); + writable = false; + buffer = compressor.buffer(); + } } /** Write the records in this set to the given channel */ diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java index 70844d6..a163333 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java @@ -41,7 +41,13 @@ public class OffsetCommitResponse extends AbstractRequestResponse { /** * Possible error code: * - * TODO + * OFFSET_METADATA_TOO_LARGE (12) + * CONSUMER_COORDINATOR_NOT_AVAILABLE (15) + * NOT_COORDINATOR_FOR_CONSUMER (16) + * ILLEGAL_GENERATION (22) + * UNKNOWN_CONSUMER_ID (25) + * COMMITTING_PARTITIONS_NOT_ASSIGNED (27) + * INVALID_COMMIT_OFFSET_SIZE (28) */ private final Map responseData; diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java index b5e8a0f..6ee7597 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java @@ -42,9 +42,6 @@ public class OffsetFetchRequest extends AbstractRequest { // partition level field names private static final String PARTITION_KEY_NAME = "partition"; - public static final int DEFAULT_GENERATION_ID = -1; - public static final String DEFAULT_CONSUMER_ID = ""; - private final String groupId; private final List partitions; diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchResponse.java index 512a0ef..3dc8521 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchResponse.java @@ -47,10 +47,11 @@ public class OffsetFetchResponse extends AbstractRequestResponse { /** * Possible error code: * - * UNKNOWN_TOPIC_OR_PARTITION (3) + * UNKNOWN_TOPIC_OR_PARTITION (3) <- only for request v0 * OFFSET_LOAD_IN_PROGRESS (14) * NOT_COORDINATOR_FOR_CONSUMER (16) - * NO_OFFSETS_FETCHABLE (23) + * ILLEGAL_GENERATION (22) + * UNKNOWN_CONSUMER_ID (25) */ private final Map responseData; diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java index 5b21eac..95ba97a 100755 --- a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java @@ -182,6 +182,21 @@ public class Utils { } /** + * Get the minimum of some long values. + * @param first Used to ensure at least one value + * @param rest The rest of longs to compare + * @return The minimum of all passed argument. + */ + public static long min(long first, long ... rest) { + long min = first; + for (int i = 0; i < rest.length; i++) { + if (rest[i] < min) + min = rest[i]; + } + return min; + } + + /** * Get the length for UTF8-encoding a string without encoding it first * * @param s The string to calculate the length for diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java index 677edd3..26b6b40 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java @@ -25,7 +25,7 @@ import org.junit.Test; public class MockConsumerTest { - private MockConsumer consumer = new MockConsumer(); + private MockConsumer consumer = new MockConsumer(OffsetResetStrategy.EARLIEST); @Test public void testSimpleMock() { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorTest.java index 1454ab7..d085fe5 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorTest.java @@ -17,10 +17,11 @@ package org.apache.kafka.clients.consumer.internals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.MockClient; +import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; @@ -49,24 +50,20 @@ public class CoordinatorTest { private String topicName = "test"; private String groupId = "test-group"; private TopicPartition tp = new TopicPartition(topicName, 0); - private long retryBackoffMs = 0L; private int sessionTimeoutMs = 10; private String rebalanceStrategy = "not-matter"; private MockTime time = new MockTime(); private MockClient client = new MockClient(time); - private Metadata metadata = new Metadata(0, Long.MAX_VALUE); private Cluster cluster = TestUtils.singletonCluster(topicName, 1); private Node node = cluster.nodes().get(0); - private SubscriptionState subscriptions = new SubscriptionState(); + private SubscriptionState subscriptions = new SubscriptionState(OffsetResetStrategy.EARLIEST); private Metrics metrics = new Metrics(time); private Map metricTags = new LinkedHashMap(); private Coordinator coordinator = new Coordinator(client, groupId, - retryBackoffMs, sessionTimeoutMs, rebalanceStrategy, - metadata, subscriptions, metrics, "consumer" + groupId, @@ -75,13 +72,14 @@ public class CoordinatorTest { @Before public void setup() { - metadata.update(cluster, time.milliseconds()); client.setNode(node); } @Test public void testNormalHeartbeat() { client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); // normal heartbeat time.sleep(sessionTimeoutMs); @@ -94,6 +92,8 @@ public class CoordinatorTest { @Test public void testCoordinatorNotAvailable() { client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); // consumer_coordinator_not_available will mark coordinator as unknown time.sleep(sessionTimeoutMs); @@ -108,6 +108,8 @@ public class CoordinatorTest { @Test public void testNotCoordinator() { client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); // not_coordinator will mark coordinator as unknown time.sleep(sessionTimeoutMs); @@ -122,6 +124,8 @@ public class CoordinatorTest { @Test public void testIllegalGeneration() { client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); // illegal_generation will cause re-partition subscriptions.subscribe(topicName); @@ -139,6 +143,8 @@ public class CoordinatorTest { @Test public void testCoordinatorDisconnect() { client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); // coordinator disconnect will mark coordinator as unknown time.sleep(sessionTimeoutMs); @@ -152,39 +158,67 @@ public class CoordinatorTest { @Test public void testNormalJoinGroup() { + subscriptions.subscribe(topicName); + subscriptions.needReassignment(); + client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); // normal join group client.prepareResponse(joinGroupResponse(1, "consumer", Collections.singletonList(tp), Errors.NONE.code())); - assertEquals(Collections.singletonList(tp), - coordinator.assignPartitions(Collections.singletonList(topicName), time.milliseconds())); - assertEquals(0, client.inFlightRequestCount()); + coordinator.assignPartitions(time.milliseconds()); + client.poll(0, time.milliseconds()); + + assertFalse(subscriptions.partitionAssignmentNeeded()); + assertEquals(Collections.singleton(tp), subscriptions.assignedPartitions()); } @Test public void testReJoinGroup() { + subscriptions.subscribe(topicName); + subscriptions.needReassignment(); + client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); + assertTrue(subscriptions.partitionAssignmentNeeded()); // diconnected from original coordinator will cause re-discover and join again client.prepareResponse(joinGroupResponse(1, "consumer", Collections.singletonList(tp), Errors.NONE.code()), true); + coordinator.assignPartitions(time.milliseconds()); + client.poll(0, time.milliseconds()); + assertTrue(subscriptions.partitionAssignmentNeeded()); + + // rediscover the coordinator client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); + + // try assigning partitions again client.prepareResponse(joinGroupResponse(1, "consumer", Collections.singletonList(tp), Errors.NONE.code())); - assertEquals(Collections.singletonList(tp), - coordinator.assignPartitions(Collections.singletonList(topicName), time.milliseconds())); - assertEquals(0, client.inFlightRequestCount()); + coordinator.assignPartitions(time.milliseconds()); + client.poll(0, time.milliseconds()); + assertFalse(subscriptions.partitionAssignmentNeeded()); + assertEquals(Collections.singleton(tp), subscriptions.assignedPartitions()); } @Test public void testCommitOffsetNormal() { client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); - // sync commit + // With success flag client.prepareResponse(offsetCommitResponse(Collections.singletonMap(tp, Errors.NONE.code()))); - coordinator.commitOffsets(Collections.singletonMap(tp, 100L), true, time.milliseconds()); + RequestFuture result = coordinator.commitOffsets(Collections.singletonMap(tp, 100L), time.milliseconds()); + assertEquals(1, client.poll(0, time.milliseconds()).size()); + assertTrue(result.isDone()); + assertTrue(result.succeeded()); - // async commit - coordinator.commitOffsets(Collections.singletonMap(tp, 100L), false, time.milliseconds()); + // Without success flag + coordinator.commitOffsets(Collections.singletonMap(tp, 100L), time.milliseconds()); client.respond(offsetCommitResponse(Collections.singletonMap(tp, Errors.NONE.code()))); assertEquals(1, client.poll(0, time.milliseconds()).size()); } @@ -192,34 +226,55 @@ public class CoordinatorTest { @Test public void testCommitOffsetError() { client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); // async commit with coordinator not available client.prepareResponse(offsetCommitResponse(Collections.singletonMap(tp, Errors.CONSUMER_COORDINATOR_NOT_AVAILABLE.code()))); - coordinator.commitOffsets(Collections.singletonMap(tp, 100L), false, time.milliseconds()); + coordinator.commitOffsets(Collections.singletonMap(tp, 100L), time.milliseconds()); assertEquals(1, client.poll(0, time.milliseconds()).size()); assertTrue(coordinator.coordinatorUnknown()); // resume client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); // async commit with not coordinator client.prepareResponse(offsetCommitResponse(Collections.singletonMap(tp, Errors.NOT_COORDINATOR_FOR_CONSUMER.code()))); - coordinator.commitOffsets(Collections.singletonMap(tp, 100L), false, time.milliseconds()); + coordinator.commitOffsets(Collections.singletonMap(tp, 100L), time.milliseconds()); assertEquals(1, client.poll(0, time.milliseconds()).size()); assertTrue(coordinator.coordinatorUnknown()); // resume client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); // sync commit with not_coordinator client.prepareResponse(offsetCommitResponse(Collections.singletonMap(tp, Errors.NOT_COORDINATOR_FOR_CONSUMER.code()))); client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(tp, Errors.NONE.code()))); - coordinator.commitOffsets(Collections.singletonMap(tp, 100L), true, time.milliseconds()); + RequestFuture result = coordinator.commitOffsets(Collections.singletonMap(tp, 100L), time.milliseconds()); + assertEquals(1, client.poll(0, time.milliseconds()).size()); + assertTrue(result.isDone()); + assertEquals(RequestFuture.RetryAction.FIND_COORDINATOR, result.retryAction()); // sync commit with coordinator disconnected client.prepareResponse(offsetCommitResponse(Collections.singletonMap(tp, Errors.NONE.code())), true); - client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(tp, Errors.NONE.code()))); - coordinator.commitOffsets(Collections.singletonMap(tp, 100L), true, time.milliseconds()); + result = coordinator.commitOffsets(Collections.singletonMap(tp, 100L), time.milliseconds()); + + assertEquals(0, client.poll(0, time.milliseconds()).size()); + assertTrue(result.isDone()); + assertEquals(RequestFuture.RetryAction.FIND_COORDINATOR, result.retryAction()); + + client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); + + result = coordinator.commitOffsets(Collections.singletonMap(tp, 100L), time.milliseconds()); + assertEquals(1, client.poll(0, time.milliseconds()).size()); + assertTrue(result.isDone()); + assertTrue(result.succeeded()); } @@ -227,33 +282,63 @@ public class CoordinatorTest { public void testFetchOffset() { client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); // normal fetch client.prepareResponse(offsetFetchResponse(tp, Errors.NONE.code(), "", 100L)); - assertEquals(100L, (long) coordinator.fetchOffsets(Collections.singleton(tp), time.milliseconds()).get(tp)); + RequestFuture> result = coordinator.fetchOffsets(Collections.singleton(tp), time.milliseconds()); + client.poll(0, time.milliseconds()); + assertTrue(result.isDone()); + assertEquals(100L, (long) result.value().get(tp)); // fetch with loading in progress client.prepareResponse(offsetFetchResponse(tp, Errors.OFFSET_LOAD_IN_PROGRESS.code(), "", 100L)); client.prepareResponse(offsetFetchResponse(tp, Errors.NONE.code(), "", 100L)); - assertEquals(100L, (long) coordinator.fetchOffsets(Collections.singleton(tp), time.milliseconds()).get(tp)); + + result = coordinator.fetchOffsets(Collections.singleton(tp), time.milliseconds()); + client.poll(0, time.milliseconds()); + assertTrue(result.isDone()); + assertTrue(result.failed()); + assertEquals(RequestFuture.RetryAction.BACKOFF, result.retryAction()); + + result = coordinator.fetchOffsets(Collections.singleton(tp), time.milliseconds()); + client.poll(0, time.milliseconds()); + assertTrue(result.isDone()); + assertEquals(100L, (long) result.value().get(tp)); // fetch with not coordinator client.prepareResponse(offsetFetchResponse(tp, Errors.NOT_COORDINATOR_FOR_CONSUMER.code(), "", 100L)); client.prepareResponse(consumerMetadataResponse(node, Errors.NONE.code())); client.prepareResponse(offsetFetchResponse(tp, Errors.NONE.code(), "", 100L)); - assertEquals(100L, (long) coordinator.fetchOffsets(Collections.singleton(tp), time.milliseconds()).get(tp)); + + result = coordinator.fetchOffsets(Collections.singleton(tp), time.milliseconds()); + client.poll(0, time.milliseconds()); + assertTrue(result.isDone()); + assertTrue(result.failed()); + assertEquals(RequestFuture.RetryAction.FIND_COORDINATOR, result.retryAction()); + + coordinator.discoverConsumerCoordinator(); + client.poll(0, time.milliseconds()); + + result = coordinator.fetchOffsets(Collections.singleton(tp), time.milliseconds()); + client.poll(0, time.milliseconds()); + assertTrue(result.isDone()); + assertEquals(100L, (long) result.value().get(tp)); // fetch with no fetchable offsets client.prepareResponse(offsetFetchResponse(tp, Errors.NONE.code(), "", -1L)); - assertEquals(0, coordinator.fetchOffsets(Collections.singleton(tp), time.milliseconds()).size()); - - // fetch with offset topic unknown - client.prepareResponse(offsetFetchResponse(tp, Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), "", 100L)); - assertEquals(0, coordinator.fetchOffsets(Collections.singleton(tp), time.milliseconds()).size()); + result = coordinator.fetchOffsets(Collections.singleton(tp), time.milliseconds()); + client.poll(0, time.milliseconds()); + assertTrue(result.isDone()); + assertTrue(result.value().isEmpty()); // fetch with offset -1 client.prepareResponse(offsetFetchResponse(tp, Errors.NONE.code(), "", -1L)); - assertEquals(0, coordinator.fetchOffsets(Collections.singleton(tp), time.milliseconds()).size()); + result = coordinator.fetchOffsets(Collections.singleton(tp), time.milliseconds()); + client.poll(0, time.milliseconds()); + assertTrue(result.isDone()); + assertTrue(result.value().isEmpty()); } private Struct consumerMetadataResponse(Node node, short error) { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java index 4195410..405efdc 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java @@ -16,11 +16,10 @@ */ package org.apache.kafka.clients.consumer.internals; -import static org.junit.Assert.assertEquals; - import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.MockClient; import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; @@ -30,10 +29,11 @@ import org.apache.kafka.common.protocol.types.Struct; import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.requests.FetchResponse; -import org.apache.kafka.common.requests.ListOffsetResponse; import org.apache.kafka.common.serialization.ByteArrayDeserializer; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.test.TestUtils; +import org.junit.Before; +import org.junit.Test; import java.nio.ByteBuffer; import java.util.Collections; @@ -41,37 +41,33 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import org.junit.Before; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; public class FetcherTest { private String topicName = "test"; private String groupId = "test-group"; private TopicPartition tp = new TopicPartition(topicName, 0); - private long retryBackoffMs = 0L; private int minBytes = 1; private int maxWaitMs = 0; private int fetchSize = 1000; - private String offsetReset = "EARLIEST"; private MockTime time = new MockTime(); private MockClient client = new MockClient(time); private Metadata metadata = new Metadata(0, Long.MAX_VALUE); private Cluster cluster = TestUtils.singletonCluster(topicName, 1); private Node node = cluster.nodes().get(0); - private SubscriptionState subscriptions = new SubscriptionState(); + private SubscriptionState subscriptions = new SubscriptionState(OffsetResetStrategy.EARLIEST); private Metrics metrics = new Metrics(time); private Map metricTags = new LinkedHashMap(); private MemoryRecords records = MemoryRecords.emptyRecords(ByteBuffer.allocate(1024), CompressionType.NONE); private Fetcher fetcher = new Fetcher(client, - retryBackoffMs, minBytes, maxWaitMs, fetchSize, true, // check crc - offsetReset, new ByteArrayDeserializer(), new ByteArrayDeserializer(), metadata, @@ -140,11 +136,11 @@ public class FetcherTest { subscriptions.fetched(tp, 5); fetcher.initFetches(cluster, time.milliseconds()); client.respond(fetchResponse(this.records.buffer(), Errors.OFFSET_OUT_OF_RANGE.code(), 100L)); - client.prepareResponse(listOffsetResponse(Collections.singletonList(0L), Errors.NONE.code())); client.poll(0, time.milliseconds()); + assertTrue(subscriptions.isOffsetResetNeeded(tp)); assertEquals(0, fetcher.fetchedRecords().size()); - assertEquals(0L, (long) subscriptions.fetched(tp)); - assertEquals(0L, (long) subscriptions.consumed(tp)); + assertEquals(null, subscriptions.fetched(tp)); + assertEquals(null, subscriptions.consumed(tp)); } @Test @@ -157,11 +153,11 @@ public class FetcherTest { // fetch with out of range fetcher.initFetches(cluster, time.milliseconds()); client.respond(fetchResponse(this.records.buffer(), Errors.OFFSET_OUT_OF_RANGE.code(), 100L)); - client.prepareResponse(listOffsetResponse(Collections.singletonList(0L), Errors.NONE.code())); client.poll(0, time.milliseconds()); + assertTrue(subscriptions.isOffsetResetNeeded(tp)); assertEquals(0, fetcher.fetchedRecords().size()); - assertEquals(0L, (long) subscriptions.fetched(tp)); - assertEquals(0L, (long) subscriptions.consumed(tp)); + assertEquals(null, subscriptions.fetched(tp)); + assertEquals(null, subscriptions.consumed(tp)); } private Struct fetchResponse(ByteBuffer buffer, short error, long hw) { @@ -169,9 +165,5 @@ public class FetcherTest { return response.toStruct(); } - private Struct listOffsetResponse(List offsets, short error) { - ListOffsetResponse response = new ListOffsetResponse(Collections.singletonMap(tp, new ListOffsetResponse.PartitionData(error, offsets))); - return response.toStruct(); - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java index ecc78ce..ee1ede0 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java @@ -20,6 +20,7 @@ import org.apache.kafka.common.utils.MockTime; import org.junit.Test; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -42,4 +43,12 @@ public class HeartbeatTest { time.sleep(timeout / (2 * Heartbeat.HEARTBEATS_PER_SESSION_INTERVAL)); assertFalse(heartbeat.shouldHeartbeat(time.milliseconds())); } + + @Test + public void testTimeToNextHeartbeat() { + heartbeat.sentHeartbeat(0); + assertEquals(100, heartbeat.timeToNextHeartbeat(0)); + assertEquals(0, heartbeat.timeToNextHeartbeat(100)); + assertEquals(0, heartbeat.timeToNextHeartbeat(200)); + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java index e000cf8..319751c 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java @@ -22,12 +22,13 @@ import static java.util.Arrays.asList; import java.util.Collections; +import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.TopicPartition; import org.junit.Test; public class SubscriptionStateTest { - private final SubscriptionState state = new SubscriptionState(); + private final SubscriptionState state = new SubscriptionState(OffsetResetStrategy.EARLIEST); private final TopicPartition tp0 = new TopicPartition("test", 0); private final TopicPartition tp1 = new TopicPartition("test", 1); @@ -43,7 +44,21 @@ public class SubscriptionStateTest { assertTrue(state.assignedPartitions().isEmpty()); assertAllPositions(tp0, null); } - + + @Test + public void partitionReset() { + state.subscribe(tp0); + state.seek(tp0, 5); + assertEquals(5L, (long) state.fetched(tp0)); + assertEquals(5L, (long) state.consumed(tp0)); + state.needOffsetReset(tp0); + assertTrue(state.isOffsetResetNeeded()); + assertTrue(state.isOffsetResetNeeded(tp0)); + assertEquals(null, state.fetched(tp0)); + assertEquals(null, state.consumed(tp0)); + } + + @Test public void topicSubscription() { state.subscribe("test"); assertEquals(1, state.subscribedTopics().size()); diff --git a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java index bf2b5bd..74ec52b 100755 --- a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java @@ -52,7 +52,7 @@ public class UtilsTest { assertEquals("[::1]:1234", formatAddress("::1", 1234)); assertEquals("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:5678", formatAddress("2001:db8:85a3:8d3:1319:8a2e:370:7348", 5678)); } - + @Test public void testJoin() { assertEquals("", Utils.join(Collections.emptyList(), ",")); @@ -101,5 +101,11 @@ public class UtilsTest { this.subTest(buffer); } - -} \ No newline at end of file + @Test + public void testMin() { + assertEquals(1, Utils.min(1)); + assertEquals(1, Utils.min(1, 2, 3)); + assertEquals(1, Utils.min(2, 1, 3)); + assertEquals(1, Utils.min(2, 3, 1)); + } +} diff --git a/core/src/main/scala/kafka/admin/TopicCommand.scala b/core/src/main/scala/kafka/admin/TopicCommand.scala index dacbdd0..a2ecb96 100755 --- a/core/src/main/scala/kafka/admin/TopicCommand.scala +++ b/core/src/main/scala/kafka/admin/TopicCommand.scala @@ -27,8 +27,8 @@ import scala.collection._ import scala.collection.JavaConversions._ import kafka.log.LogConfig import kafka.consumer.Whitelist -import kafka.server.OffsetManager import org.apache.kafka.common.utils.Utils +import kafka.coordinator.ConsumerCoordinator object TopicCommand { @@ -111,7 +111,7 @@ object TopicCommand { println("Updated config for topic \"%s\".".format(topic)) } if(opts.options.has(opts.partitionsOpt)) { - if (topic == OffsetManager.OffsetsTopicName) { + if (topic == ConsumerCoordinator.OffsetsTopicName) { throw new IllegalArgumentException("The number of partitions for the offsets topic cannot be changed.") } println("WARNING: If partitions are increased for a topic that has a key, the partition " + diff --git a/core/src/main/scala/kafka/cluster/Partition.scala b/core/src/main/scala/kafka/cluster/Partition.scala index 6cb6477..2649090 100755 --- a/core/src/main/scala/kafka/cluster/Partition.scala +++ b/core/src/main/scala/kafka/cluster/Partition.scala @@ -22,7 +22,7 @@ import kafka.utils.CoreUtils.{inReadLock,inWriteLock} import kafka.admin.AdminUtils import kafka.api.{PartitionStateInfo, LeaderAndIsr} import kafka.log.LogConfig -import kafka.server.{TopicPartitionOperationKey, LogOffsetMetadata, OffsetManager, LogReadResult, ReplicaManager} +import kafka.server.{TopicPartitionOperationKey, LogOffsetMetadata, LogReadResult, ReplicaManager} import kafka.metrics.KafkaMetricsGroup import kafka.controller.KafkaController import kafka.message.ByteBufferMessageSet @@ -160,8 +160,7 @@ class Partition(val topic: String, * and setting the new leader and ISR */ def makeLeader(controllerId: Int, - partitionStateInfo: PartitionStateInfo, correlationId: Int, - offsetManager: OffsetManager): Boolean = { + partitionStateInfo: PartitionStateInfo, correlationId: Int): Boolean = { inWriteLock(leaderIsrUpdateLock) { val allReplicas = partitionStateInfo.allReplicas val leaderIsrAndControllerEpoch = partitionStateInfo.leaderIsrAndControllerEpoch @@ -186,8 +185,6 @@ class Partition(val topic: String, if (r.brokerId != localBrokerId) r.updateLogReadResult(LogReadResult.UnknownLogReadResult)) // we may need to increment high watermark since ISR could be down to 1 maybeIncrementLeaderHW(newLeaderReplica) - if (topic == OffsetManager.OffsetsTopicName) - offsetManager.loadOffsetsFromLog(partitionId) true } } @@ -198,7 +195,7 @@ class Partition(val topic: String, */ def makeFollower(controllerId: Int, partitionStateInfo: PartitionStateInfo, - correlationId: Int, offsetManager: OffsetManager): Boolean = { + correlationId: Int): Boolean = { inWriteLock(leaderIsrUpdateLock) { val allReplicas = partitionStateInfo.allReplicas val leaderIsrAndControllerEpoch = partitionStateInfo.leaderIsrAndControllerEpoch @@ -215,13 +212,6 @@ class Partition(val topic: String, leaderEpoch = leaderAndIsr.leaderEpoch zkVersion = leaderAndIsr.zkVersion - leaderReplicaIdOpt.foreach { leaderReplica => - if (topic == OffsetManager.OffsetsTopicName && - /* if we are making a leader->follower transition */ - leaderReplica == localBrokerId) - offsetManager.removeOffsetsFromCacheForPartition(partitionId) - } - if (leaderReplicaIdOpt.isDefined && leaderReplicaIdOpt.get == newLeaderBrokerId) { false } @@ -249,13 +239,12 @@ class Partition(val topic: String, TopicAndPartition(topic, partitionId))) case None => throw new NotAssignedReplicaException(("Leader %d failed to record follower %d's position %d since the replica" + - " is not recognized to be one of the assigned replicas %s for partition [%s,%d]") + " is not recognized to be one of the assigned replicas %s for partition %s.") .format(localBrokerId, replicaId, - logReadResult.info.fetchOffsetMetadata, + logReadResult.info.fetchOffsetMetadata.messageOffset, assignedReplicas().map(_.brokerId).mkString(","), - topic, - partitionId)) + TopicAndPartition(topic, partitionId))) } } diff --git a/core/src/main/scala/kafka/common/OffsetMetadataAndError.scala b/core/src/main/scala/kafka/common/OffsetMetadataAndError.scala index 6b4242c..deb48b1 100644 --- a/core/src/main/scala/kafka/common/OffsetMetadataAndError.scala +++ b/core/src/main/scala/kafka/common/OffsetMetadataAndError.scala @@ -17,6 +17,8 @@ package kafka.common +import org.apache.kafka.common.protocol.Errors + case class OffsetMetadata(offset: Long, metadata: String = OffsetMetadata.NoMetadata) { override def toString = "OffsetMetadata[%d,%s]" .format(offset, @@ -51,7 +53,7 @@ object OffsetAndMetadata { def apply(offset: Long) = new OffsetAndMetadata(OffsetMetadata(offset, OffsetMetadata.NoMetadata)) } -case class OffsetMetadataAndError(offsetMetadata: OffsetMetadata, error: Short = ErrorMapping.NoError) { +case class OffsetMetadataAndError(offsetMetadata: OffsetMetadata, error: Short = Errors.NONE.code) { def offset = offsetMetadata.offset def metadata = offsetMetadata.metadata @@ -60,10 +62,12 @@ case class OffsetMetadataAndError(offsetMetadata: OffsetMetadata, error: Short = } object OffsetMetadataAndError { - val NoOffset = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, ErrorMapping.NoError) - val OffsetsLoading = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, ErrorMapping.OffsetsLoadInProgressCode) - val UnknownTopicOrPartition = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, ErrorMapping.UnknownTopicOrPartitionCode) - val NotOffsetManagerForGroup = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, ErrorMapping.NotCoordinatorForConsumerCode) + val NoOffset = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, Errors.NONE.code) + val OffsetsLoading = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, Errors.OFFSET_LOAD_IN_PROGRESS.code) + val UnknownConsumer = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, Errors.UNKNOWN_CONSUMER_ID.code) + val NotCoordinatorForGroup = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, Errors.NOT_COORDINATOR_FOR_CONSUMER.code) + val UnknownTopicOrPartition = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, Errors.UNKNOWN_TOPIC_OR_PARTITION.code) + val IllegalGroupGenerationId = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, Errors.ILLEGAL_GENERATION.code) def apply(offset: Long) = new OffsetMetadataAndError(OffsetMetadata(offset, OffsetMetadata.NoMetadata), ErrorMapping.NoError) diff --git a/core/src/main/scala/kafka/common/Topic.scala b/core/src/main/scala/kafka/common/Topic.scala index ad75978..32595d6 100644 --- a/core/src/main/scala/kafka/common/Topic.scala +++ b/core/src/main/scala/kafka/common/Topic.scala @@ -18,7 +18,7 @@ package kafka.common import util.matching.Regex -import kafka.server.OffsetManager +import kafka.coordinator.ConsumerCoordinator object Topic { @@ -26,7 +26,7 @@ object Topic { private val maxNameLength = 255 private val rgx = new Regex(legalChars + "+") - val InternalTopics = Set(OffsetManager.OffsetsTopicName) + val InternalTopics = Set(ConsumerCoordinator.OffsetsTopicName) def validate(topic: String) { if (topic.length <= 0) diff --git a/core/src/main/scala/kafka/common/TopicAndPartition.scala b/core/src/main/scala/kafka/common/TopicAndPartition.scala index df3db91..13a3f28 100644 --- a/core/src/main/scala/kafka/common/TopicAndPartition.scala +++ b/core/src/main/scala/kafka/common/TopicAndPartition.scala @@ -1,6 +1,7 @@ package kafka.common import kafka.cluster.{Replica, Partition} +import kafka.utils.Json /** * Licensed to the Apache Software Foundation (ASF) under one or more @@ -24,6 +25,8 @@ import kafka.cluster.{Replica, Partition} */ case class TopicAndPartition(topic: String, partition: Int) { + private val version: Long = 1L + def this(tuple: (String, Int)) = this(tuple._1, tuple._2) def this(partition: Partition) = this(partition.topic, partition.partitionId) @@ -33,5 +36,6 @@ case class TopicAndPartition(topic: String, partition: Int) { def asTuple = (topic, partition) override def toString = "[%s,%d]".format(topic, partition) -} + def toJson = Json.encode(Map("version" -> version, "topic" -> topic, "partition" -> partition)) +} \ No newline at end of file diff --git a/core/src/main/scala/kafka/controller/KafkaController.scala b/core/src/main/scala/kafka/controller/KafkaController.scala index 3635057..20f1499 100755 --- a/core/src/main/scala/kafka/controller/KafkaController.scala +++ b/core/src/main/scala/kafka/controller/KafkaController.scala @@ -16,8 +16,9 @@ */ package kafka.controller -import collection._ -import collection.Set +import java.util + +import scala.collection._ import com.yammer.metrics.core.Gauge import java.util.concurrent.TimeUnit import kafka.admin.AdminUtils @@ -31,7 +32,7 @@ import kafka.utils.ZkUtils._ import kafka.utils._ import kafka.utils.CoreUtils._ import org.apache.zookeeper.Watcher.Event.KeeperState -import org.I0Itec.zkclient.{IZkDataListener, IZkStateListener, ZkClient} +import org.I0Itec.zkclient.{IZkChildListener, IZkDataListener, IZkStateListener, ZkClient} import org.I0Itec.zkclient.exception.{ZkNodeExistsException, ZkNoNodeException} import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.locks.ReentrantLock @@ -169,6 +170,7 @@ class KafkaController(val config : KafkaConfig, zkClient: ZkClient, val brokerSt private val partitionReassignedListener = new PartitionsReassignedListener(this) private val preferredReplicaElectionListener = new PreferredReplicaElectionListener(this) + private val isrChangeNotificationListener = new IsrChangeNotificationListener(this) newGauge( "ActiveControllerCount", @@ -307,6 +309,7 @@ class KafkaController(val config : KafkaConfig, zkClient: ZkClient, val brokerSt incrementControllerEpoch(zkClient) // before reading source of truth from zookeeper, register the listeners to get broker/topic callbacks registerReassignedPartitionsListener() + registerIsrChangeNotificationListener() registerPreferredReplicaElectionListener() partitionStateMachine.registerListeners() replicaStateMachine.registerListeners() @@ -339,6 +342,7 @@ class KafkaController(val config : KafkaConfig, zkClient: ZkClient, val brokerSt */ def onControllerResignation() { // de-register listeners + deregisterIsrChangeNotificationListener() deregisterReassignedPartitionsListener() deregisterPreferredReplicaElectionListener() @@ -792,8 +796,8 @@ class KafkaController(val config : KafkaConfig, zkClient: ZkClient, val brokerSt controllerContext.controllerChannelManager.startup() } - private def updateLeaderAndIsrCache() { - val leaderAndIsrInfo = ZkUtils.getPartitionLeaderAndIsrForTopics(zkClient, controllerContext.partitionReplicaAssignment.keySet) + def updateLeaderAndIsrCache(topicAndPartitions: Set[TopicAndPartition] = controllerContext.partitionReplicaAssignment.keySet) { + val leaderAndIsrInfo = ZkUtils.getPartitionLeaderAndIsrForTopics(zkClient, topicAndPartitions) for((topicPartition, leaderIsrAndControllerEpoch) <- leaderAndIsrInfo) controllerContext.partitionLeadershipInfo.put(topicPartition, leaderIsrAndControllerEpoch) } @@ -888,6 +892,17 @@ class KafkaController(val config : KafkaConfig, zkClient: ZkClient, val brokerSt } } + private def registerIsrChangeNotificationListener() = { + debug("Registering IsrChangeNotificationListener") + ZkUtils.makeSurePersistentPathExists(zkClient, ZkUtils.IsrChangeNotificationPath) + zkClient.subscribeChildChanges(ZkUtils.IsrChangeNotificationPath, isrChangeNotificationListener) + } + + private def deregisterIsrChangeNotificationListener() = { + debug("De-registering IsrChangeNotificationListener") + zkClient.unsubscribeChildChanges(ZkUtils.IsrChangeNotificationPath, isrChangeNotificationListener) + } + private def registerReassignedPartitionsListener() = { zkClient.subscribeDataChanges(ZkUtils.ReassignPartitionsPath, partitionReassignedListener) } @@ -1281,6 +1296,56 @@ class ReassignedPartitionsIsrChangeListener(controller: KafkaController, topic: } /** + * Called when leader intimates of isr change + * @param controller + */ +class IsrChangeNotificationListener(controller: KafkaController) extends IZkChildListener with Logging { + var topicAndPartitionSet: Set[TopicAndPartition] = Set() + + override def handleChildChange(parentPath: String, currentChildren: util.List[String]): Unit = { + import scala.collection.JavaConverters._ + + inLock(controller.controllerContext.controllerLock) { + debug("[IsrChangeNotificationListener] Fired!!!") + val childrenAsScala: mutable.Buffer[String] = currentChildren.asScala + val topicAndPartitions: immutable.Set[TopicAndPartition] = childrenAsScala.map(x => getTopicAndPartition(x)).flatten.toSet + controller.updateLeaderAndIsrCache(topicAndPartitions) + processUpdateNotifications(topicAndPartitions) + + // delete processed children + childrenAsScala.map(x => ZkUtils.deletePath(controller.controllerContext.zkClient, ZkUtils.TopicConfigChangesPath + "/" + x)) + } + } + + private def processUpdateNotifications(topicAndPartitions: immutable.Set[TopicAndPartition]) { + val liveBrokers: Seq[Int] = controller.controllerContext.liveOrShuttingDownBrokerIds.toSeq + controller.sendUpdateMetadataRequest(liveBrokers, topicAndPartitions) + debug("Sending MetadataRequest to Brokers:" + liveBrokers + " for TopicAndPartitions:" + topicAndPartitions) + } + + private def getTopicAndPartition(child: String): Option[TopicAndPartition] = { + val changeZnode: String = ZkUtils.IsrChangeNotificationPath + "/" + child + val (jsonOpt, stat) = ZkUtils.readDataMaybeNull(controller.controllerContext.zkClient, changeZnode) + if (jsonOpt.isDefined) { + val json = Json.parseFull(jsonOpt.get) + + json match { + case Some(m) => + val topicAndPartition = m.asInstanceOf[Map[String, Any]] + val topic = topicAndPartition("topic").asInstanceOf[String] + val partition = topicAndPartition("partition").asInstanceOf[Int] + Some(TopicAndPartition(topic, partition)) + case None => + error("Invalid topic and partition JSON: " + json + " in ZK: " + changeZnode) + None + } + } else { + None + } + } +} + +/** * Starts the preferred replica leader election for the list of partitions specified under * /admin/preferred_replica_election - */ diff --git a/core/src/main/scala/kafka/coordinator/ConsumerCoordinator.scala b/core/src/main/scala/kafka/coordinator/ConsumerCoordinator.scala index 51e89c8..6c2df4c 100644 --- a/core/src/main/scala/kafka/coordinator/ConsumerCoordinator.scala +++ b/core/src/main/scala/kafka/coordinator/ConsumerCoordinator.scala @@ -16,7 +16,9 @@ */ package kafka.coordinator -import kafka.common.TopicAndPartition +import kafka.common.{OffsetMetadataAndError, OffsetAndMetadata, TopicAndPartition} +import kafka.message.UncompressedCodec +import kafka.log.LogConfig import kafka.server._ import kafka.utils._ import org.apache.kafka.common.protocol.Errors @@ -24,7 +26,11 @@ import org.apache.kafka.common.requests.JoinGroupRequest import org.I0Itec.zkclient.ZkClient import java.util.concurrent.atomic.AtomicBoolean +import java.util.Properties +import scala.collection.{Map, Seq, immutable} +case class GroupManagerConfig(consumerMinSessionTimeoutMs: Int, + consumerMaxSessionTimeoutMs: Int) /** * ConsumerCoordinator handles consumer group and consumer offset management. @@ -33,11 +39,13 @@ import java.util.concurrent.atomic.AtomicBoolean * consumer groups. Consumer groups are assigned to coordinators based on their * group names. */ -class ConsumerCoordinator(val config: KafkaConfig, - val zkClient: ZkClient, - val offsetManager: OffsetManager) extends Logging { +class ConsumerCoordinator(val brokerId: Int, + val groupConfig: GroupManagerConfig, + val offsetConfig: OffsetManagerConfig, + private val offsetManager: OffsetManager, + zkClient: ZkClient) extends Logging { - this.logIdent = "[ConsumerCoordinator " + config.brokerId + "]: " + this.logIdent = "[ConsumerCoordinator " + brokerId + "]: " private val isActive = new AtomicBoolean(false) @@ -45,9 +53,25 @@ class ConsumerCoordinator(val config: KafkaConfig, private var rebalancePurgatory: DelayedOperationPurgatory[DelayedRebalance] = null private var coordinatorMetadata: CoordinatorMetadata = null + def this(brokerId: Int, + groupConfig: GroupManagerConfig, + offsetConfig: OffsetManagerConfig, + replicaManager: ReplicaManager, + zkClient: ZkClient, + scheduler: KafkaScheduler) = this(brokerId, groupConfig, offsetConfig, + new OffsetManager(offsetConfig, replicaManager, zkClient, scheduler), zkClient) + + def offsetsTopicConfigs: Properties = { + val props = new Properties + props.put(LogConfig.CleanupPolicyProp, LogConfig.Compact) + props.put(LogConfig.SegmentBytesProp, offsetConfig.offsetsTopicSegmentBytes.toString) + props.put(LogConfig.CompressionTypeProp, UncompressedCodec.name) + props + } + /** - * NOTE: If a group lock and coordinatorLock are simultaneously needed, - * be sure to acquire the group lock before coordinatorLock to prevent deadlock + * NOTE: If a group lock and metadataLock are simultaneously needed, + * be sure to acquire the group lock before metadataLock to prevent deadlock */ /** @@ -55,9 +79,9 @@ class ConsumerCoordinator(val config: KafkaConfig, */ def startup() { info("Starting up.") - heartbeatPurgatory = new DelayedOperationPurgatory[DelayedHeartbeat]("Heartbeat", config.brokerId) - rebalancePurgatory = new DelayedOperationPurgatory[DelayedRebalance]("Rebalance", config.brokerId) - coordinatorMetadata = new CoordinatorMetadata(config, zkClient, maybePrepareRebalance) + heartbeatPurgatory = new DelayedOperationPurgatory[DelayedHeartbeat]("Heartbeat", brokerId) + rebalancePurgatory = new DelayedOperationPurgatory[DelayedRebalance]("Rebalance", brokerId) + coordinatorMetadata = new CoordinatorMetadata(brokerId, zkClient, maybePrepareRebalance) isActive.set(true) info("Startup complete.") } @@ -69,6 +93,7 @@ class ConsumerCoordinator(val config: KafkaConfig, def shutdown() { info("Shutting down.") isActive.set(false) + offsetManager.shutdown() coordinatorMetadata.shutdown() heartbeatPurgatory.shutdown() rebalancePurgatory.shutdown() @@ -87,7 +112,8 @@ class ConsumerCoordinator(val config: KafkaConfig, responseCallback(Set.empty, consumerId, 0, Errors.NOT_COORDINATOR_FOR_CONSUMER.code) } else if (!PartitionAssignor.strategies.contains(partitionAssignmentStrategy)) { responseCallback(Set.empty, consumerId, 0, Errors.UNKNOWN_PARTITION_ASSIGNMENT_STRATEGY.code) - } else if (sessionTimeoutMs < config.consumerMinSessionTimeoutMs || sessionTimeoutMs > config.consumerMaxSessionTimeoutMs) { + } else if (sessionTimeoutMs < groupConfig.consumerMinSessionTimeoutMs || + sessionTimeoutMs > groupConfig.consumerMaxSessionTimeoutMs) { responseCallback(Set.empty, consumerId, 0, Errors.INVALID_SESSION_TIMEOUT.code) } else { // only try to create the group if the group is not unknown AND @@ -184,7 +210,7 @@ class ConsumerCoordinator(val config: KafkaConfig, responseCallback(Errors.UNKNOWN_CONSUMER_ID.code) } else if (!group.has(consumerId)) { responseCallback(Errors.UNKNOWN_CONSUMER_ID.code) - } else if (generationId != group.generationId) { + } else if (generationId != group.generationId || !group.is(Stable)) { responseCallback(Errors.ILLEGAL_GENERATION.code) } else { val consumer = group.get(consumerId) @@ -196,6 +222,75 @@ class ConsumerCoordinator(val config: KafkaConfig, } } + def handleCommitOffsets(groupId: String, + consumerId: String, + generationId: Int, + offsetMetadata: immutable.Map[TopicAndPartition, OffsetAndMetadata], + responseCallback: immutable.Map[TopicAndPartition, Short] => Unit) { + if (!isActive.get) { + responseCallback(offsetMetadata.mapValues(_ => Errors.CONSUMER_COORDINATOR_NOT_AVAILABLE.code)) + } else if (!isCoordinatorForGroup(groupId)) { + responseCallback(offsetMetadata.mapValues(_ => Errors.NOT_COORDINATOR_FOR_CONSUMER.code)) + } else { + val group = coordinatorMetadata.getGroup(groupId) + if (group == null) { + // if the group does not exist, it means this group is not relying + // on Kafka for partition management, and hence never send join-group + // request to the coordinator before; in this case blindly commit the offsets + offsetManager.storeOffsets(groupId, consumerId, generationId, offsetMetadata, responseCallback) + } else { + group synchronized { + if (group.is(Dead)) { + responseCallback(offsetMetadata.mapValues(_ => Errors.UNKNOWN_CONSUMER_ID.code)) + } else if (!group.has(consumerId)) { + responseCallback(offsetMetadata.mapValues(_ => Errors.UNKNOWN_CONSUMER_ID.code)) + } else if (generationId != group.generationId) { + responseCallback(offsetMetadata.mapValues(_ => Errors.ILLEGAL_GENERATION.code)) + } else if (!offsetMetadata.keySet.subsetOf(group.get(consumerId).assignedTopicPartitions)) { + responseCallback(offsetMetadata.mapValues(_ => Errors.COMMITTING_PARTITIONS_NOT_ASSIGNED.code)) + } else { + offsetManager.storeOffsets(groupId, consumerId, generationId, offsetMetadata, responseCallback) + } + } + } + } + } + + def handleFetchOffsets(groupId: String, + partitions: Seq[TopicAndPartition]): Map[TopicAndPartition, OffsetMetadataAndError] = { + if (!isActive.get) { + partitions.map {case topicAndPartition => (topicAndPartition, OffsetMetadataAndError.NotCoordinatorForGroup)}.toMap + } else if (!isCoordinatorForGroup(groupId)) { + partitions.map {case topicAndPartition => (topicAndPartition, OffsetMetadataAndError.NotCoordinatorForGroup)}.toMap + } else { + val group = coordinatorMetadata.getGroup(groupId) + if (group == null) { + // if the group does not exist, it means this group is not relying + // on Kafka for partition management, and hence never send join-group + // request to the coordinator before; in this case blindly fetch the offsets + offsetManager.getOffsets(groupId, partitions) + } else { + group synchronized { + if (group.is(Dead)) { + partitions.map {case topicAndPartition => (topicAndPartition, OffsetMetadataAndError.UnknownConsumer)}.toMap + } else { + offsetManager.getOffsets(groupId, partitions) + } + } + } + } + } + + def handleGroupImmigration(offsetTopicPartitionId: Int) = { + // TODO we may need to add more logic in KAFKA-2017 + offsetManager.loadOffsetsFromLog(offsetTopicPartitionId) + } + + def handleGroupEmigration(offsetTopicPartitionId: Int) = { + // TODO we may need to add more logic in KAFKA-2017 + offsetManager.removeOffsetsFromCacheForPartition(offsetTopicPartitionId) + } + /** * Complete existing DelayedHeartbeats for the given consumer and schedule the next one */ @@ -246,8 +341,7 @@ class ConsumerCoordinator(val config: KafkaConfig, private def prepareRebalance(group: ConsumerGroupMetadata) { group.transitionTo(PreparingRebalance) - group.generationId += 1 - info("Preparing to rebalance group %s generation %s".format(group.groupId, group.generationId)) + info("Preparing to rebalance group %s with old generation %s".format(group.groupId, group.generationId)) val rebalanceTimeout = group.rebalanceTimeout val delayedRebalance = new DelayedRebalance(this, group, rebalanceTimeout) @@ -259,7 +353,9 @@ class ConsumerCoordinator(val config: KafkaConfig, assert(group.notYetRejoinedConsumers == List.empty[ConsumerMetadata]) group.transitionTo(Rebalancing) - info("Rebalancing group %s generation %s".format(group.groupId, group.generationId)) + group.generationId += 1 + + info("Rebalancing group %s with new generation %s".format(group.groupId, group.generationId)) val assignedPartitionsPerConsumer = reassignPartitions(group) trace("Rebalance for group %s generation %s has assigned partitions: %s" @@ -275,8 +371,6 @@ class ConsumerCoordinator(val config: KafkaConfig, maybePrepareRebalance(group) } - private def isCoordinatorForGroup(groupId: String) = offsetManager.leaderIsLocal(offsetManager.partitionFor(groupId)) - private def reassignPartitions(group: ConsumerGroupMetadata) = { val assignor = PartitionAssignor.createInstance(group.partitionAssignmentStrategy) val topicsPerConsumer = group.topicsPerConsumer @@ -345,8 +439,54 @@ class ConsumerCoordinator(val config: KafkaConfig, } } - def onCompleteHeartbeat() {} + def onCompleteHeartbeat() { + // TODO: add metrics for complete heartbeats + } + + def partitionFor(group: String): Int = offsetManager.partitionFor(group) private def shouldKeepConsumerAlive(consumer: ConsumerMetadata, heartbeatDeadline: Long) = consumer.awaitingRebalanceCallback != null || consumer.latestHeartbeat + consumer.sessionTimeoutMs > heartbeatDeadline + + private def isCoordinatorForGroup(groupId: String) = offsetManager.leaderIsLocal(offsetManager.partitionFor(groupId)) +} + +object ConsumerCoordinator { + + val OffsetsTopicName = "__consumer_offsets" + + def create(config: KafkaConfig, + zkClient: ZkClient, + replicaManager: ReplicaManager, + kafkaScheduler: KafkaScheduler): ConsumerCoordinator = { + val offsetConfig = OffsetManagerConfig(maxMetadataSize = config.offsetMetadataMaxSize, + loadBufferSize = config.offsetsLoadBufferSize, + offsetsRetentionMs = config.offsetsRetentionMinutes * 60 * 1000L, + offsetsRetentionCheckIntervalMs = config.offsetsRetentionCheckIntervalMs, + offsetsTopicNumPartitions = config.offsetsTopicPartitions, + offsetsTopicReplicationFactor = config.offsetsTopicReplicationFactor, + offsetCommitTimeoutMs = config.offsetCommitTimeoutMs, + offsetCommitRequiredAcks = config.offsetCommitRequiredAcks) + val groupConfig = GroupManagerConfig(consumerMinSessionTimeoutMs = config.consumerMinSessionTimeoutMs, + consumerMaxSessionTimeoutMs = config.consumerMaxSessionTimeoutMs) + + new ConsumerCoordinator(config.brokerId, groupConfig, offsetConfig, replicaManager, zkClient, kafkaScheduler) + } + + def create(config: KafkaConfig, + zkClient: ZkClient, + offsetManager: OffsetManager): ConsumerCoordinator = { + val offsetConfig = OffsetManagerConfig(maxMetadataSize = config.offsetMetadataMaxSize, + loadBufferSize = config.offsetsLoadBufferSize, + offsetsRetentionMs = config.offsetsRetentionMinutes * 60 * 1000L, + offsetsRetentionCheckIntervalMs = config.offsetsRetentionCheckIntervalMs, + offsetsTopicNumPartitions = config.offsetsTopicPartitions, + offsetsTopicReplicationFactor = config.offsetsTopicReplicationFactor, + offsetCommitTimeoutMs = config.offsetCommitTimeoutMs, + offsetCommitRequiredAcks = config.offsetCommitRequiredAcks) + val groupConfig = GroupManagerConfig(consumerMinSessionTimeoutMs = config.consumerMinSessionTimeoutMs, + consumerMaxSessionTimeoutMs = config.consumerMaxSessionTimeoutMs) + + new ConsumerCoordinator(config.brokerId, groupConfig, offsetConfig, offsetManager, zkClient) + } } diff --git a/core/src/main/scala/kafka/coordinator/CoordinatorMetadata.scala b/core/src/main/scala/kafka/coordinator/CoordinatorMetadata.scala index c39e6de..2920320 100644 --- a/core/src/main/scala/kafka/coordinator/CoordinatorMetadata.scala +++ b/core/src/main/scala/kafka/coordinator/CoordinatorMetadata.scala @@ -32,13 +32,13 @@ import scala.collection.mutable * It delegates all group logic to the callers. */ @threadsafe -private[coordinator] class CoordinatorMetadata(config: KafkaConfig, +private[coordinator] class CoordinatorMetadata(brokerId: Int, zkClient: ZkClient, maybePrepareRebalance: ConsumerGroupMetadata => Unit) { /** - * NOTE: If a group lock and coordinatorLock are simultaneously needed, - * be sure to acquire the group lock before coordinatorLock to prevent deadlock + * NOTE: If a group lock and metadataLock are simultaneously needed, + * be sure to acquire the group lock before metadataLock to prevent deadlock */ private val metadataLock = new ReentrantReadWriteLock() @@ -179,7 +179,7 @@ private[coordinator] class CoordinatorMetadata(config: KafkaConfig, * Zookeeper listener to handle topic partition changes */ class TopicPartitionChangeListener extends IZkDataListener with Logging { - this.logIdent = "[TopicPartitionChangeListener on Coordinator " + config.brokerId + "]: " + this.logIdent = "[TopicPartitionChangeListener on Coordinator " + brokerId + "]: " override def handleDataChange(dataPath: String, data: Object) { info("Handling data change for path: %s data: %s".format(dataPath, data)) diff --git a/core/src/main/scala/kafka/log/LogCleaner.scala b/core/src/main/scala/kafka/log/LogCleaner.scala index d07a391..b36ea0d 100644 --- a/core/src/main/scala/kafka/log/LogCleaner.scala +++ b/core/src/main/scala/kafka/log/LogCleaner.scala @@ -559,11 +559,17 @@ private[log] class Cleaner(val id: Int, // but we may be able to fit more (if there is lots of duplication in the dirty section of the log) var offset = dirty.head.baseOffset require(offset == start, "Last clean offset is %d but segment base offset is %d for log %s.".format(start, offset, log.name)) - val minStopOffset = (start + map.slots * this.dupBufferLoadFactor).toLong - for (segment <- dirty) { + val maxDesiredMapSize = (map.slots * this.dupBufferLoadFactor).toInt + var full = false + for (segment <- dirty if !full) { checkDone(log.topicAndPartition) - if(segment.baseOffset <= minStopOffset || map.utilization < this.dupBufferLoadFactor) + val segmentSize = segment.nextOffset() - segment.baseOffset + + require(segmentSize <= maxDesiredMapSize, "%d messages in segment %s/%s but offset map can fit only %d. You can increase log.cleaner.dedupe.buffer.size or decrease log.cleaner.threads".format(segmentSize, log.name, segment.log.file.getName, maxDesiredMapSize)) + if (map.size + segmentSize <= maxDesiredMapSize) offset = buildOffsetMapForSegment(log.topicAndPartition, segment, map) + else + full = true } info("Offset map for log %s complete.".format(log.name)) offset diff --git a/core/src/main/scala/kafka/log/OffsetIndex.scala b/core/src/main/scala/kafka/log/OffsetIndex.scala index a1082ae..332d5e2 100755 --- a/core/src/main/scala/kafka/log/OffsetIndex.scala +++ b/core/src/main/scala/kafka/log/OffsetIndex.scala @@ -274,7 +274,7 @@ class OffsetIndex(@volatile var file: File, val baseOffset: Long, val maxIndexSi */ def resize(newSize: Int) { inLock(lock) { - val raf = new RandomAccessFile(file, "rws") + val raf = new RandomAccessFile(file, "rw") val roundedNewSize = roundToExactMultiple(newSize, 8) val position = this.mmap.position diff --git a/core/src/main/scala/kafka/producer/KafkaLog4jAppender.scala b/core/src/main/scala/kafka/producer/KafkaLog4jAppender.scala deleted file mode 100644 index 5d36a01..0000000 --- a/core/src/main/scala/kafka/producer/KafkaLog4jAppender.scala +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ - -package kafka.producer - -import async.MissingConfigException -import org.apache.log4j.spi.LoggingEvent -import org.apache.log4j.AppenderSkeleton -import org.apache.log4j.helpers.LogLog -import kafka.utils.Logging -import java.util.{Properties, Date} -import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} - -class KafkaLog4jAppender extends AppenderSkeleton with Logging { - var topic: String = null - var brokerList: String = null - var compressionType: String = null - var retries: Int = 0 - var requiredNumAcks: Int = Int.MaxValue - var syncSend: Boolean = false - - private var producer: KafkaProducer[Array[Byte],Array[Byte]] = null - - def getTopic: String = topic - def setTopic(topic: String) { this.topic = topic } - - def getBrokerList: String = brokerList - def setBrokerList(brokerList: String) { this.brokerList = brokerList } - - def getCompressionType: String = compressionType - def setCompressionType(compressionType: String) { this.compressionType = compressionType } - - def getRequiredNumAcks: Int = requiredNumAcks - def setRequiredNumAcks(requiredNumAcks: Int) { this.requiredNumAcks = requiredNumAcks } - - def getSyncSend: Boolean = syncSend - def setSyncSend(syncSend: Boolean) { this.syncSend = syncSend } - - def getRetries: Int = retries - def setRetries(retries: Int) { this.retries = retries } - - override def activateOptions() { - // check for config parameter validity - val props = new Properties() - if(brokerList != null) - props.put(org.apache.kafka.clients.producer.ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList) - if(props.isEmpty) - throw new MissingConfigException("The bootstrap servers property should be specified") - if(topic == null) - throw new MissingConfigException("topic must be specified by the Kafka log4j appender") - if(compressionType != null) props.put(org.apache.kafka.clients.producer.ProducerConfig.COMPRESSION_TYPE_CONFIG, compressionType) - if(requiredNumAcks != Int.MaxValue) props.put(org.apache.kafka.clients.producer.ProducerConfig.ACKS_CONFIG, requiredNumAcks.toString) - if(retries > 0) props.put(org.apache.kafka.clients.producer.ProducerConfig.RETRIES_CONFIG, retries.toString) - props.put(org.apache.kafka.clients.producer.ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer") - props.put(org.apache.kafka.clients.producer.ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer") - producer = new KafkaProducer[Array[Byte],Array[Byte]](props) - LogLog.debug("Kafka producer connected to " + brokerList) - LogLog.debug("Logging for topic: " + topic) - } - - override def append(event: LoggingEvent) { - val message = subAppend(event) - LogLog.debug("[" + new Date(event.getTimeStamp).toString + "]" + message) - val response = producer.send(new ProducerRecord[Array[Byte],Array[Byte]](topic, message.getBytes())) - if (syncSend) response.get - } - - def subAppend(event: LoggingEvent): String = { - if(this.layout == null) - event.getRenderedMessage - else - this.layout.format(event) - } - - override def close() { - if(!this.closed) { - this.closed = true - producer.close() - } - } - - override def requiresLayout: Boolean = true -} diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index ad6f058..18f5b5b 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -37,7 +37,6 @@ import org.I0Itec.zkclient.ZkClient */ class KafkaApis(val requestChannel: RequestChannel, val replicaManager: ReplicaManager, - val offsetManager: OffsetManager, val coordinator: ConsumerCoordinator, val controller: KafkaController, val zkClient: ZkClient, @@ -95,8 +94,23 @@ class KafkaApis(val requestChannel: RequestChannel, // stop serving data to clients for the topic being deleted val leaderAndIsrRequest = request.requestObj.asInstanceOf[LeaderAndIsrRequest] try { - val (response, error) = replicaManager.becomeLeaderOrFollower(leaderAndIsrRequest, offsetManager) - val leaderAndIsrResponse = new LeaderAndIsrResponse(leaderAndIsrRequest.correlationId, response, error) + // call replica manager to handle updating partitions to become leader or follower + val result = replicaManager.becomeLeaderOrFollower(leaderAndIsrRequest) + val leaderAndIsrResponse = new LeaderAndIsrResponse(leaderAndIsrRequest.correlationId, result.responseMap, result.errorCode) + // for each new leader or follower, call coordinator to handle + // consumer group migration + result.updatedLeaders.foreach { case partition => + if (partition.topic == ConsumerCoordinator.OffsetsTopicName) + coordinator.handleGroupImmigration(partition.partitionId) + } + result.updatedFollowers.foreach { case partition => + partition.leaderReplicaIdOpt.foreach { leaderReplica => + if (partition.topic == ConsumerCoordinator.OffsetsTopicName && + leaderReplica == brokerId) + coordinator.handleGroupEmigration(partition.partitionId) + } + } + requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, leaderAndIsrResponse))) } catch { case e: KafkaStorageException => @@ -142,6 +156,12 @@ class KafkaApis(val requestChannel: RequestChannel, def handleOffsetCommitRequest(request: RequestChannel.Request) { val offsetCommitRequest = request.requestObj.asInstanceOf[OffsetCommitRequest] + // filter non-exist topics + val invalidRequestsInfo = offsetCommitRequest.requestInfo.filter { case (topicAndPartition, offsetMetadata) => + !metadataCache.contains(topicAndPartition.topic) + } + val filteredRequestInfo = (offsetCommitRequest.requestInfo -- invalidRequestsInfo.keys) + // the callback for sending an offset commit response def sendResponseCallback(commitStatus: immutable.Map[TopicAndPartition, Short]) { commitStatus.foreach { case (topicAndPartition, errorCode) => @@ -154,14 +174,14 @@ class KafkaApis(val requestChannel: RequestChannel, topicAndPartition, ErrorMapping.exceptionNameFor(errorCode))) } } - - val response = OffsetCommitResponse(commitStatus, offsetCommitRequest.correlationId) + val combinedCommitStatus = commitStatus ++ invalidRequestsInfo.map(_._1 -> ErrorMapping.UnknownTopicOrPartitionCode) + val response = OffsetCommitResponse(combinedCommitStatus, offsetCommitRequest.correlationId) requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, response))) } if (offsetCommitRequest.versionId == 0) { // for version 0 always store offsets to ZK - val responseInfo = offsetCommitRequest.requestInfo.map { + val responseInfo = filteredRequestInfo.map { case (topicAndPartition, metaAndError) => { val topicDirs = new ZKGroupTopicDirs(offsetCommitRequest.groupId, topicAndPartition.topic) try { @@ -189,7 +209,7 @@ class KafkaApis(val requestChannel: RequestChannel, val offsetRetention = if (offsetCommitRequest.versionId <= 1 || offsetCommitRequest.retentionMs == org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_RETENTION_TIME) { - offsetManager.config.offsetsRetentionMs + coordinator.offsetConfig.offsetsRetentionMs } else { offsetCommitRequest.retentionMs } @@ -203,7 +223,7 @@ class KafkaApis(val requestChannel: RequestChannel, val currentTimestamp = SystemTime.milliseconds val defaultExpireTimestamp = offsetRetention + currentTimestamp - val offsetData = offsetCommitRequest.requestInfo.mapValues(offsetAndMetadata => + val offsetData = filteredRequestInfo.mapValues(offsetAndMetadata => offsetAndMetadata.copy( commitTimestamp = currentTimestamp, expireTimestamp = { @@ -215,8 +235,8 @@ class KafkaApis(val requestChannel: RequestChannel, ) ) - // call offset manager to store offsets - offsetManager.storeOffsets( + // call coordinator to handle commit offset + coordinator.handleCommitOffsets( offsetCommitRequest.groupId, offsetCommitRequest.consumerId, offsetCommitRequest.groupGenerationId, @@ -422,9 +442,9 @@ class KafkaApis(val requestChannel: RequestChannel, if (topics.size > 0 && topicResponses.size != topics.size) { val nonExistentTopics = topics -- topicResponses.map(_.topic).toSet val responsesForNonExistentTopics = nonExistentTopics.map { topic => - if (topic == OffsetManager.OffsetsTopicName || config.autoCreateTopicsEnable) { + if (topic == ConsumerCoordinator.OffsetsTopicName || config.autoCreateTopicsEnable) { try { - if (topic == OffsetManager.OffsetsTopicName) { + if (topic == ConsumerCoordinator.OffsetsTopicName) { val aliveBrokers = metadataCache.getAliveBrokers val offsetsTopicReplicationFactor = if (aliveBrokers.length > 0) @@ -433,7 +453,7 @@ class KafkaApis(val requestChannel: RequestChannel, config.offsetsTopicReplicationFactor.toInt AdminUtils.createTopic(zkClient, topic, config.offsetsTopicPartitions, offsetsTopicReplicationFactor, - offsetManager.offsetsTopicConfig) + coordinator.offsetsTopicConfigs) info("Auto creation of topic %s with %d partitions and replication factor %d is successful!" .format(topic, config.offsetsTopicPartitions, offsetsTopicReplicationFactor)) } @@ -496,26 +516,19 @@ class KafkaApis(val requestChannel: RequestChannel, OffsetFetchResponse(collection.immutable.Map(responseInfo: _*), offsetFetchRequest.correlationId) } else { - // version 1 reads offsets from Kafka - val (unknownTopicPartitions, knownTopicPartitions) = offsetFetchRequest.requestInfo.partition(topicAndPartition => - metadataCache.getPartitionInfo(topicAndPartition.topic, topicAndPartition.partition).isEmpty - ) - val unknownStatus = unknownTopicPartitions.map(topicAndPartition => (topicAndPartition, OffsetMetadataAndError.UnknownTopicOrPartition)).toMap - val knownStatus = - if (knownTopicPartitions.size > 0) - offsetManager.getOffsets(offsetFetchRequest.groupId, knownTopicPartitions).toMap - else - Map.empty[TopicAndPartition, OffsetMetadataAndError] - val status = unknownStatus ++ knownStatus + // version 1 reads offsets from Kafka; + val offsets = coordinator.handleFetchOffsets(offsetFetchRequest.groupId, offsetFetchRequest.requestInfo).toMap - OffsetFetchResponse(status, offsetFetchRequest.correlationId) + // Note that we do not need to filter the partitions in the + // metadata cache as the topic partitions will be filtered + // in coordinator's offset manager through the offset cache + OffsetFetchResponse(offsets, offsetFetchRequest.correlationId) } trace("Sending offset fetch response %s for correlation id %d to client %s." .format(response, offsetFetchRequest.correlationId, offsetFetchRequest.clientId)) requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, response))) - } /* @@ -524,10 +537,10 @@ class KafkaApis(val requestChannel: RequestChannel, def handleConsumerMetadataRequest(request: RequestChannel.Request) { val consumerMetadataRequest = request.requestObj.asInstanceOf[ConsumerMetadataRequest] - val partition = offsetManager.partitionFor(consumerMetadataRequest.group) + val partition = coordinator.partitionFor(consumerMetadataRequest.group) // get metadata (and create the topic if necessary) - val offsetsTopicMetadata = getTopicMetadata(Set(OffsetManager.OffsetsTopicName), request.securityProtocol).head + val offsetsTopicMetadata = getTopicMetadata(Set(ConsumerCoordinator.OffsetsTopicName), request.securityProtocol).head val errorResponse = ConsumerMetadataResponse(None, ErrorMapping.ConsumerCoordinatorNotAvailableCode, consumerMetadataRequest.correlationId) diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 755fb85..08c1d3f 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -816,8 +816,7 @@ case class KafkaConfig (props: java.util.Map[_, _]) extends AbstractConfig(Kafka validateUniquePortAndProtocol(getString(KafkaConfig.AdvertisedListenersProp)) CoreUtils.listenerListToEndPoints(getString(KafkaConfig.AdvertisedListenersProp)) } else if (getString(KafkaConfig.AdvertisedHostNameProp) != null || getInt(KafkaConfig.AdvertisedPortProp) != null) { - CoreUtils.listenerListToEndPoints("PLAINTEXT://" + - getString(KafkaConfig.AdvertisedHostNameProp) + ":" + getInt(KafkaConfig.AdvertisedPortProp)) + CoreUtils.listenerListToEndPoints("PLAINTEXT://" + advertisedHostName + ":" + advertisedPort) } else { getListeners() } diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala index 603dd52..76ac548 100755 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ b/core/src/main/scala/kafka/server/KafkaServer.scala @@ -42,7 +42,7 @@ import kafka.common.{ErrorMapping, InconsistentBrokerIdException, GenerateBroker import kafka.network.{BlockingChannel, SocketServer} import kafka.metrics.KafkaMetricsGroup import com.yammer.metrics.core.Gauge -import kafka.coordinator.ConsumerCoordinator +import kafka.coordinator.{GroupManagerConfig, ConsumerCoordinator} /** * Represents the lifecycle of a single Kafka broker. Handles all functionality required @@ -63,8 +63,6 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg var logManager: LogManager = null - var offsetManager: OffsetManager = null - var replicaManager: ReplicaManager = null var topicConfigManager: TopicConfigManager = null @@ -130,19 +128,16 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg replicaManager = new ReplicaManager(config, time, zkClient, kafkaScheduler, logManager, isShuttingDown) replicaManager.startup() - /* start offset manager */ - offsetManager = createOffsetManager() - /* start kafka controller */ kafkaController = new KafkaController(config, zkClient, brokerState) kafkaController.startup() /* start kafka coordinator */ - consumerCoordinator = new ConsumerCoordinator(config, zkClient, offsetManager) + consumerCoordinator = ConsumerCoordinator.create(config, zkClient, replicaManager, kafkaScheduler) consumerCoordinator.startup() /* start processing requests */ - apis = new KafkaApis(socketServer.requestChannel, replicaManager, offsetManager, consumerCoordinator, + apis = new KafkaApis(socketServer.requestChannel, replicaManager, consumerCoordinator, kafkaController, zkClient, config.brokerId, config, metadataCache) requestHandlerPool = new KafkaRequestHandlerPool(config.brokerId, socketServer.requestChannel, apis, config.numIoThreads) brokerState.newState(RunningAsBroker) @@ -322,8 +317,6 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg CoreUtils.swallow(socketServer.shutdown()) if(requestHandlerPool != null) CoreUtils.swallow(requestHandlerPool.shutdown()) - if(offsetManager != null) - offsetManager.shutdown() CoreUtils.swallow(kafkaScheduler.shutdown()) if(apis != null) CoreUtils.swallow(apis.close()) @@ -423,19 +416,6 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg logProps } - private def createOffsetManager(): OffsetManager = { - val offsetManagerConfig = OffsetManagerConfig( - maxMetadataSize = config.offsetMetadataMaxSize, - loadBufferSize = config.offsetsLoadBufferSize, - offsetsRetentionMs = config.offsetsRetentionMinutes * 60 * 1000L, - offsetsRetentionCheckIntervalMs = config.offsetsRetentionCheckIntervalMs, - offsetsTopicNumPartitions = config.offsetsTopicPartitions, - offsetsTopicReplicationFactor = config.offsetsTopicReplicationFactor, - offsetCommitTimeoutMs = config.offsetCommitTimeoutMs, - offsetCommitRequiredAcks = config.offsetCommitRequiredAcks) - new OffsetManager(offsetManagerConfig, replicaManager, zkClient, kafkaScheduler, metadataCache) - } - /** * Generates new brokerId or reads from meta.properties based on following conditions *

      diff --git a/core/src/main/scala/kafka/server/OffsetManager.scala b/core/src/main/scala/kafka/server/OffsetManager.scala index 5cca85c..47b6ce9 100755 --- a/core/src/main/scala/kafka/server/OffsetManager.scala +++ b/core/src/main/scala/kafka/server/OffsetManager.scala @@ -17,6 +17,7 @@ package kafka.server +import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.protocol.types.{Struct, Schema, Field} import org.apache.kafka.common.protocol.types.Type.STRING import org.apache.kafka.common.protocol.types.Type.INT32 @@ -25,19 +26,19 @@ import org.apache.kafka.common.utils.Utils import kafka.utils._ import kafka.common._ -import kafka.log.{FileMessageSet, LogConfig} +import kafka.log.FileMessageSet import kafka.message._ import kafka.metrics.KafkaMetricsGroup import kafka.common.TopicAndPartition import kafka.tools.MessageFormatter import kafka.api.ProducerResponseStatus +import kafka.coordinator.ConsumerCoordinator import scala.Some import scala.collection._ import java.io.PrintStream import java.util.concurrent.atomic.AtomicBoolean import java.nio.ByteBuffer -import java.util.Properties import java.util.concurrent.TimeUnit import com.yammer.metrics.core.Gauge @@ -87,8 +88,7 @@ object OffsetManagerConfig { class OffsetManager(val config: OffsetManagerConfig, replicaManager: ReplicaManager, zkClient: ZkClient, - scheduler: Scheduler, - metadataCache: MetadataCache) extends Logging with KafkaMetricsGroup { + scheduler: Scheduler) extends Logging with KafkaMetricsGroup { /* offsets and metadata cache */ private val offsetsCache = new Pool[GroupTopicPartition, OffsetAndMetadata] @@ -143,9 +143,9 @@ class OffsetManager(val config: OffsetManagerConfig, // Append the tombstone messages to the offset partitions. It is okay if the replicas don't receive these (say, // if we crash or leaders move) since the new leaders will get rid of expired offsets during their own purge cycles. tombstonesForPartition.flatMap { case (offsetsPartition, tombstones) => - val partitionOpt = replicaManager.getPartition(OffsetManager.OffsetsTopicName, offsetsPartition) + val partitionOpt = replicaManager.getPartition(ConsumerCoordinator.OffsetsTopicName, offsetsPartition) partitionOpt.map { partition => - val appendPartition = TopicAndPartition(OffsetManager.OffsetsTopicName, offsetsPartition) + val appendPartition = TopicAndPartition(ConsumerCoordinator.OffsetsTopicName, offsetsPartition) val messages = tombstones.map(_._2).toSeq trace("Marked %d offsets in %s for deletion.".format(messages.size, appendPartition)) @@ -170,14 +170,6 @@ class OffsetManager(val config: OffsetManagerConfig, } - def offsetsTopicConfig: Properties = { - val props = new Properties - props.put(LogConfig.SegmentBytesProp, config.offsetsTopicSegmentBytes.toString) - props.put(LogConfig.CleanupPolicyProp, "compact") - props.put(LogConfig.CompressionTypeProp, "uncompressed") - props - } - def partitionFor(group: String): Int = Utils.abs(group.hashCode) % config.offsetsTopicNumPartitions /** @@ -214,22 +206,14 @@ class OffsetManager(val config: OffsetManagerConfig, /** * Store offsets by appending it to the replicated log and then inserting to cache */ - // TODO: generation id and consumer id is needed by coordinator to do consumer checking in the future def storeOffsets(groupId: String, consumerId: String, generationId: Int, offsetMetadata: immutable.Map[TopicAndPartition, OffsetAndMetadata], responseCallback: immutable.Map[TopicAndPartition, Short] => Unit) { - // check if there are any non-existent topics - val nonExistentTopics = offsetMetadata.filter { case (topicAndPartition, offsetMetadata) => - !metadataCache.contains(topicAndPartition.topic) - } - - // first filter out partitions with offset metadata size exceeding limit or - // if its a non existing topic - // TODO: in the future we may want to only support atomic commit and hence fail the whole commit + // first filter out partitions with offset metadata size exceeding limit val filteredOffsetMetadata = offsetMetadata.filter { case (topicAndPartition, offsetAndMetadata) => - validateOffsetMetadataLength(offsetAndMetadata.metadata) || nonExistentTopics.contains(topicAndPartition) + validateOffsetMetadataLength(offsetAndMetadata.metadata) } // construct the message set to append @@ -240,7 +224,7 @@ class OffsetManager(val config: OffsetManagerConfig, ) }.toSeq - val offsetTopicPartition = TopicAndPartition(OffsetManager.OffsetsTopicName, partitionFor(groupId)) + val offsetTopicPartition = TopicAndPartition(ConsumerCoordinator.OffsetsTopicName, partitionFor(groupId)) val offsetsAndMetadataMessageSet = Map(offsetTopicPartition -> new ByteBufferMessageSet(config.offsetsTopicCompressionCodec, messages:_*)) @@ -271,6 +255,10 @@ class OffsetManager(val config: OffsetManagerConfig, ErrorMapping.ConsumerCoordinatorNotAvailableCode else if (status.error == ErrorMapping.NotLeaderForPartitionCode) ErrorMapping.NotCoordinatorForConsumerCode + else if (status.error == ErrorMapping.MessageSizeTooLargeCode + || status.error == ErrorMapping.MessageSetSizeTooLargeCode + || status.error == ErrorMapping.InvalidFetchSizeCode) + Errors.INVALID_COMMIT_OFFSET_SIZE.code else status.error } @@ -278,9 +266,7 @@ class OffsetManager(val config: OffsetManagerConfig, // compute the final error codes for the commit response val commitStatus = offsetMetadata.map { case (topicAndPartition, offsetAndMetadata) => - if (nonExistentTopics.contains(topicAndPartition)) - (topicAndPartition, ErrorMapping.UnknownTopicOrPartitionCode) - else if (validateOffsetMetadataLength(offsetAndMetadata.metadata)) + if (validateOffsetMetadataLength(offsetAndMetadata.metadata)) (topicAndPartition, responseCode) else (topicAndPartition, ErrorMapping.OffsetMetadataTooLargeCode) @@ -338,7 +324,7 @@ class OffsetManager(val config: OffsetManagerConfig, debug("Could not fetch offsets for group %s (not offset coordinator).".format(group)) topicPartitions.map { topicAndPartition => val groupTopicPartition = GroupTopicPartition(group, topicAndPartition) - (groupTopicPartition.topicPartition, OffsetMetadataAndError.NotOffsetManagerForGroup) + (groupTopicPartition.topicPartition, OffsetMetadataAndError.NotCoordinatorForGroup) }.toMap } } @@ -349,7 +335,7 @@ class OffsetManager(val config: OffsetManagerConfig, */ def loadOffsetsFromLog(offsetsPartition: Int) { - val topicPartition = TopicAndPartition(OffsetManager.OffsetsTopicName, offsetsPartition) + val topicPartition = TopicAndPartition(ConsumerCoordinator.OffsetsTopicName, offsetsPartition) loadingPartitions synchronized { if (loadingPartitions.contains(offsetsPartition)) { @@ -421,7 +407,7 @@ class OffsetManager(val config: OffsetManagerConfig, } private def getHighWatermark(partitionId: Int): Long = { - val partitionOpt = replicaManager.getPartition(OffsetManager.OffsetsTopicName, partitionId) + val partitionOpt = replicaManager.getPartition(ConsumerCoordinator.OffsetsTopicName, partitionId) val hw = partitionOpt.map { partition => partition.leaderReplicaIfLocal().map(_.highWatermark.messageOffset).getOrElse(-1L) @@ -449,7 +435,7 @@ class OffsetManager(val config: OffsetManagerConfig, } if (numRemoved > 0) info("Removed %d cached offsets for %s on follower transition." - .format(numRemoved, TopicAndPartition(OffsetManager.OffsetsTopicName, offsetsPartition))) + .format(numRemoved, TopicAndPartition(ConsumerCoordinator.OffsetsTopicName, offsetsPartition))) } @@ -461,8 +447,6 @@ class OffsetManager(val config: OffsetManagerConfig, object OffsetManager { - val OffsetsTopicName = "__consumer_offsets" - private case class KeyAndValueSchemas(keySchema: Schema, valueSchema: Schema) private val CURRENT_OFFSET_SCHEMA_VERSION = 1.toShort diff --git a/core/src/main/scala/kafka/server/ReplicaManager.scala b/core/src/main/scala/kafka/server/ReplicaManager.scala index 59c9bc3..795220e 100644 --- a/core/src/main/scala/kafka/server/ReplicaManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaManager.scala @@ -23,19 +23,19 @@ import kafka.cluster.{BrokerEndPoint, Partition, Replica} import kafka.log.{LogAppendInfo, LogManager} import kafka.metrics.KafkaMetricsGroup import kafka.controller.KafkaController -import kafka.common.TopicAndPartition import kafka.message.{ByteBufferMessageSet, MessageSet} +import kafka.api.ProducerResponseStatus +import kafka.common.TopicAndPartition +import kafka.api.PartitionFetchInfo + +import org.apache.kafka.common.protocol.Errors import java.util.concurrent.atomic.AtomicBoolean import java.io.{IOException, File} import java.util.concurrent.TimeUnit -import org.apache.kafka.common.protocol.Errors -import scala.Predef._ +import scala.Some import scala.collection._ -import scala.collection.mutable.HashMap -import scala.collection.Map -import scala.collection.Set import org.I0Itec.zkclient.ZkClient import com.yammer.metrics.core.Gauge @@ -84,6 +84,17 @@ object LogReadResult { false) } +case class BecomeLeaderOrFollowerResult(responseMap: collection.Map[(String, Int), Short], + updatedLeaders: Set[Partition], + updatedFollowers: Set[Partition], + errorCode: Short) { + + override def toString = { + "updated leaders: [%s], updated followers: [%s], update results: [%s], global error: [%d]" + .format(updatedLeaders, updatedFollowers, responseMap, errorCode) + } +} + object ReplicaManager { val HighWatermarkFilename = "replication-offset-checkpoint" } @@ -393,10 +404,10 @@ class ReplicaManager(val config: KafkaConfig, (topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(utpe))) case nle: NotLeaderForPartitionException => (topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(nle))) - case mtl: MessageSizeTooLargeException => - (topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(mtl))) - case mstl: MessageSetSizeTooLargeException => - (topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(mstl))) + case mtle: MessageSizeTooLargeException => + (topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(mtle))) + case mstle: MessageSetSizeTooLargeException => + (topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(mstle))) case imse : InvalidMessageSizeException => (topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(imse))) case t: Throwable => @@ -416,7 +427,7 @@ class ReplicaManager(val config: KafkaConfig, def fetchMessages(timeout: Long, replicaId: Int, fetchMinBytes: Int, - fetchInfo: Map[TopicAndPartition, PartitionFetchInfo], + fetchInfo: immutable.Map[TopicAndPartition, PartitionFetchInfo], responseCallback: Map[TopicAndPartition, FetchResponsePartitionData] => Unit) { val isFromFollower = replicaId >= 0 @@ -544,30 +555,29 @@ class ReplicaManager(val config: KafkaConfig, } } - def becomeLeaderOrFollower(leaderAndISRRequest: LeaderAndIsrRequest, - offsetManager: OffsetManager): (collection.Map[(String, Int), Short], Short) = { + def becomeLeaderOrFollower(leaderAndISRRequest: LeaderAndIsrRequest): BecomeLeaderOrFollowerResult = { leaderAndISRRequest.partitionStateInfos.foreach { case ((topic, partition), stateInfo) => stateChangeLogger.trace("Broker %d received LeaderAndIsr request %s correlation id %d from controller %d epoch %d for partition [%s,%d]" .format(localBrokerId, stateInfo, leaderAndISRRequest.correlationId, leaderAndISRRequest.controllerId, leaderAndISRRequest.controllerEpoch, topic, partition)) } replicaStateChangeLock synchronized { - val responseMap = new collection.mutable.HashMap[(String, Int), Short] - if(leaderAndISRRequest.controllerEpoch < controllerEpoch) { + val responseMap = new mutable.HashMap[(String, Int), Short] + if (leaderAndISRRequest.controllerEpoch < controllerEpoch) { leaderAndISRRequest.partitionStateInfos.foreach { case ((topic, partition), stateInfo) => stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d since " + "its controller epoch %d is old. Latest known controller epoch is %d").format(localBrokerId, leaderAndISRRequest.controllerId, leaderAndISRRequest.correlationId, leaderAndISRRequest.controllerEpoch, controllerEpoch)) } - (responseMap, ErrorMapping.StaleControllerEpochCode) + BecomeLeaderOrFollowerResult(responseMap, Set.empty[Partition], Set.empty[Partition], ErrorMapping.StaleControllerEpochCode) } else { val controllerId = leaderAndISRRequest.controllerId val correlationId = leaderAndISRRequest.correlationId controllerEpoch = leaderAndISRRequest.controllerEpoch // First check partition's leader epoch - val partitionState = new HashMap[Partition, PartitionStateInfo]() - leaderAndISRRequest.partitionStateInfos.foreach{ case ((topic, partitionId), partitionStateInfo) => + val partitionState = new mutable.HashMap[Partition, PartitionStateInfo]() + leaderAndISRRequest.partitionStateInfos.foreach { case ((topic, partitionId), partitionStateInfo) => val partition = getOrCreatePartition(topic, partitionId) val partitionLeaderEpoch = partition.getLeaderEpoch() // If the leader epoch is valid record the epoch of the controller that made the leadership decision. @@ -591,14 +601,19 @@ class ReplicaManager(val config: KafkaConfig, } } - val partitionsTobeLeader = partitionState - .filter{ case (partition, partitionStateInfo) => partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leader == config.brokerId} + val partitionsTobeLeader = partitionState.filter { case (partition, partitionStateInfo) => + partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leader == config.brokerId + } val partitionsToBeFollower = (partitionState -- partitionsTobeLeader.keys) - if (!partitionsTobeLeader.isEmpty) - makeLeaders(controllerId, controllerEpoch, partitionsTobeLeader, leaderAndISRRequest.correlationId, responseMap, offsetManager) - if (!partitionsToBeFollower.isEmpty) - makeFollowers(controllerId, controllerEpoch, partitionsToBeFollower, leaderAndISRRequest.leaders, leaderAndISRRequest.correlationId, responseMap, offsetManager) + val partitionsBecomeLeader = if (!partitionsTobeLeader.isEmpty) + makeLeaders(controllerId, controllerEpoch, partitionsTobeLeader, leaderAndISRRequest.correlationId, responseMap) + else + Set.empty[Partition] + val partitionsBecomeFollower = if (!partitionsToBeFollower.isEmpty) + makeFollowers(controllerId, controllerEpoch, partitionsToBeFollower, leaderAndISRRequest.leaders, leaderAndISRRequest.correlationId, responseMap) + else + Set.empty[Partition] // we initialize highwatermark thread after the first leaderisrrequest. This ensures that all the partitions // have been completely populated before starting the checkpointing there by avoiding weird race conditions @@ -607,7 +622,7 @@ class ReplicaManager(val config: KafkaConfig, hwThreadInitialized = true } replicaFetcherManager.shutdownIdleFetcherThreads() - (responseMap, ErrorMapping.NoError) + BecomeLeaderOrFollowerResult(responseMap, partitionsBecomeLeader, partitionsBecomeFollower, ErrorMapping.NoError) } } } @@ -623,10 +638,11 @@ class ReplicaManager(val config: KafkaConfig, * the error message will be set on each partition since we do not know which partition caused it * TODO: the above may need to be fixed later */ - private def makeLeaders(controllerId: Int, epoch: Int, + private def makeLeaders(controllerId: Int, + epoch: Int, partitionState: Map[Partition, PartitionStateInfo], - correlationId: Int, responseMap: mutable.Map[(String, Int), Short], - offsetManager: OffsetManager) = { + correlationId: Int, + responseMap: mutable.Map[(String, Int), Short]): Set[Partition] = { partitionState.foreach(state => stateChangeLogger.trace(("Broker %d handling LeaderAndIsr request correlationId %d from controller %d epoch %d " + "starting the become-leader transition for partition %s") @@ -645,7 +661,7 @@ class ReplicaManager(val config: KafkaConfig, } // Update the partition information to be the leader partitionState.foreach{ case (partition, partitionStateInfo) => - partition.makeLeader(controllerId, partitionStateInfo, correlationId, offsetManager)} + partition.makeLeader(controllerId, partitionStateInfo, correlationId)} } catch { case e: Throwable => @@ -664,6 +680,8 @@ class ReplicaManager(val config: KafkaConfig, "for the become-leader transition for partition %s") .format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId))) } + + partitionState.keySet } /* @@ -682,9 +700,12 @@ class ReplicaManager(val config: KafkaConfig, * If an unexpected error is thrown in this function, it will be propagated to KafkaApis where * the error message will be set on each partition since we do not know which partition caused it */ - private def makeFollowers(controllerId: Int, epoch: Int, partitionState: Map[Partition, PartitionStateInfo], - leaders: Set[BrokerEndPoint], correlationId: Int, responseMap: mutable.Map[(String, Int), Short], - offsetManager: OffsetManager) { + private def makeFollowers(controllerId: Int, + epoch: Int, + partitionState: Map[Partition, PartitionStateInfo], + leaders: Set[BrokerEndPoint], + correlationId: Int, + responseMap: mutable.Map[(String, Int), Short]) : Set[Partition] = { partitionState.foreach { state => stateChangeLogger.trace(("Broker %d handling LeaderAndIsr request correlationId %d from controller %d epoch %d " + "starting the become-follower transition for partition %s") @@ -694,18 +715,18 @@ class ReplicaManager(val config: KafkaConfig, for (partition <- partitionState.keys) responseMap.put((partition.topic, partition.partitionId), ErrorMapping.NoError) - try { + val partitionsToMakeFollower: mutable.Set[Partition] = mutable.Set() - var partitionsToMakeFollower: Set[Partition] = Set() + try { - // TODO: Delete leaders from LeaderAndIsrRequest in 0.8.1 + // TODO: Delete leaders from LeaderAndIsrRequest partitionState.foreach{ case (partition, partitionStateInfo) => val leaderIsrAndControllerEpoch = partitionStateInfo.leaderIsrAndControllerEpoch val newLeaderBrokerId = leaderIsrAndControllerEpoch.leaderAndIsr.leader leaders.find(_.id == newLeaderBrokerId) match { // Only change partition state when the leader is available case Some(leaderBroker) => - if (partition.makeFollower(controllerId, partitionStateInfo, correlationId, offsetManager)) + if (partition.makeFollower(controllerId, partitionStateInfo, correlationId)) partitionsToMakeFollower += partition else stateChangeLogger.info(("Broker %d skipped the become-follower state change after marking its partition as follower with correlation id %d from " + @@ -775,6 +796,8 @@ class ReplicaManager(val config: KafkaConfig, "for the become-follower transition for partition %s") .format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId))) } + + partitionsToMakeFollower } private def maybeShrinkIsr(): Unit = { diff --git a/core/src/main/scala/kafka/tools/MirrorMaker.scala b/core/src/main/scala/kafka/tools/MirrorMaker.scala index 459aaec..797b4bb 100755 --- a/core/src/main/scala/kafka/tools/MirrorMaker.scala +++ b/core/src/main/scala/kafka/tools/MirrorMaker.scala @@ -131,9 +131,9 @@ object MirrorMaker extends Logging with KafkaMetricsGroup { .ofType(classOf[String]) val messageHandlerOpt = parser.accepts("message.handler", - "The consumer rebalance listener to use for mirror maker consumer.") + "Message handler which will process every record in-between consumer and producer.") .withRequiredArg() - .describedAs("A custom rebalance listener of type MirrorMakerMessageHandler") + .describedAs("A custom message handler of type MirrorMakerMessageHandler") .ofType(classOf[String]) val messageHandlerArgsOpt = parser.accepts("message.handler.args", diff --git a/core/src/main/scala/kafka/utils/ReplicationUtils.scala b/core/src/main/scala/kafka/utils/ReplicationUtils.scala index 6068733..783ba10 100644 --- a/core/src/main/scala/kafka/utils/ReplicationUtils.scala +++ b/core/src/main/scala/kafka/utils/ReplicationUtils.scala @@ -18,22 +18,32 @@ package kafka.utils import kafka.api.LeaderAndIsr +import kafka.common.TopicAndPartition import kafka.controller.LeaderIsrAndControllerEpoch -import org.apache.zookeeper.data.Stat import org.I0Itec.zkclient.ZkClient +import org.apache.zookeeper.data.Stat -import scala.Some import scala.collection._ object ReplicationUtils extends Logging { + val IsrChangeNotificationPrefix = "isr_change_" + def updateLeaderAndIsr(zkClient: ZkClient, topic: String, partitionId: Int, newLeaderAndIsr: LeaderAndIsr, controllerEpoch: Int, zkVersion: Int): (Boolean,Int) = { debug("Updated ISR for partition [%s,%d] to %s".format(topic, partitionId, newLeaderAndIsr.isr.mkString(","))) val path = ZkUtils.getTopicPartitionLeaderAndIsrPath(topic, partitionId) val newLeaderData = ZkUtils.leaderAndIsrZkData(newLeaderAndIsr, controllerEpoch) // use the epoch of the controller that made the leadership decision, instead of the current controller epoch - ZkUtils.conditionalUpdatePersistentPath(zkClient, path, newLeaderData, zkVersion, Some(checkLeaderAndIsrZkData)) + val updatePersistentPath: (Boolean, Int) = ZkUtils.conditionalUpdatePersistentPath(zkClient, path, newLeaderData, zkVersion, Some(checkLeaderAndIsrZkData)) + if (updatePersistentPath._1) { + val topicAndPartition: TopicAndPartition = TopicAndPartition(topic, partitionId) + val isrChangeNotificationPath: String = ZkUtils.createSequentialPersistentPath( + zkClient, ZkUtils.IsrChangeNotificationPath + "/" + IsrChangeNotificationPrefix, + topicAndPartition.toJson) + debug("Added " + isrChangeNotificationPath + " for " + topicAndPartition) + } + updatePersistentPath } def checkLeaderAndIsrZkData(zkClient: ZkClient, path: String, expectedLeaderAndIsrInfo: String): (Boolean,Int) = { diff --git a/core/src/main/scala/kafka/utils/ZkUtils.scala b/core/src/main/scala/kafka/utils/ZkUtils.scala index 78475e3..166814c 100644 --- a/core/src/main/scala/kafka/utils/ZkUtils.scala +++ b/core/src/main/scala/kafka/utils/ZkUtils.scala @@ -47,6 +47,7 @@ object ZkUtils extends Logging { val DeleteTopicsPath = "/admin/delete_topics" val PreferredReplicaLeaderElectionPath = "/admin/preferred_replica_election" val BrokerSequenceIdPath = "/brokers/seqid" + val IsrChangeNotificationPath = "/isr_change_notification" def getTopicPath(topic: String): String = { BrokerTopicsPath + "/" + topic diff --git a/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala index f56096b..b0750fa 100644 --- a/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala +++ b/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala @@ -14,14 +14,10 @@ package kafka.api import kafka.server.KafkaConfig -import org.apache.kafka.clients.producer.ProducerConfig -import org.apache.kafka.clients.producer.ProducerRecord -import org.apache.kafka.clients.consumer.ConsumerConfig -import org.apache.kafka.clients.consumer.CommitType +import kafka.utils.{Logging, ShutdownableThread, TestUtils} +import org.apache.kafka.clients.consumer._ +import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord} import org.apache.kafka.common.TopicPartition - -import kafka.utils.{ShutdownableThread, TestUtils, Logging} - import org.junit.Assert._ import scala.collection.JavaConversions._ @@ -85,9 +81,11 @@ class ConsumerBounceTest extends IntegrationTestHarness with Logging { assertEquals(consumed.toLong, record.offset()) consumed += 1 } + consumer.commit(CommitType.SYNC) + assertEquals(consumer.position(tp), consumer.committed(tp)) - if (consumed == numRecords) { + if (consumer.position(tp) == numRecords) { consumer.seekToBeginning() consumed = 0 } diff --git a/core/src/test/scala/integration/kafka/api/ConsumerTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerTest.scala index 17b17b9..92ffb91 100644 --- a/core/src/test/scala/integration/kafka/api/ConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/ConsumerTest.scala @@ -25,12 +25,13 @@ import org.apache.kafka.common.TopicPartition import org.apache.kafka.clients.consumer.NoOffsetForPartitionException import kafka.utils.{TestUtils, Logging} -import kafka.server.{KafkaConfig, OffsetManager} +import kafka.server.KafkaConfig import java.util.ArrayList import org.junit.Assert._ import scala.collection.JavaConversions._ +import kafka.coordinator.ConsumerCoordinator /** @@ -158,9 +159,9 @@ class ConsumerTest extends IntegrationTestHarness with Logging { consumer0.poll(50) // get metadata for the topic - var parts = consumer0.partitionsFor(OffsetManager.OffsetsTopicName) + var parts = consumer0.partitionsFor(ConsumerCoordinator.OffsetsTopicName) while(parts == null) - parts = consumer0.partitionsFor(OffsetManager.OffsetsTopicName) + parts = consumer0.partitionsFor(ConsumerCoordinator.OffsetsTopicName) assertEquals(1, parts.size) assertNotNull(parts(0).leader()) diff --git a/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala b/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala index 07b1ff4..afcc349 100644 --- a/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala +++ b/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala @@ -26,6 +26,7 @@ import org.apache.kafka.clients.producer.KafkaProducer import kafka.server.{OffsetManager, KafkaConfig} import kafka.integration.KafkaServerTestHarness import scala.collection.mutable.Buffer +import kafka.coordinator.ConsumerCoordinator /** * A helper class for writing integration tests that involve producers, consumers, and servers @@ -63,11 +64,11 @@ trait IntegrationTestHarness extends KafkaServerTestHarness { consumers += new KafkaConsumer(consumerConfig) // create the consumer offset topic - TestUtils.createTopic(zkClient, OffsetManager.OffsetsTopicName, - serverConfig.getProperty("offsets.topic.num.partitions").toInt, - serverConfig.getProperty("offsets.topic.replication.factor").toInt, + TestUtils.createTopic(zkClient, ConsumerCoordinator.OffsetsTopicName, + serverConfig.getProperty(KafkaConfig.OffsetsTopicPartitionsProp).toInt, + serverConfig.getProperty(KafkaConfig.OffsetsTopicReplicationFactorProp).toInt, servers, - servers(0).offsetManager.offsetsTopicConfig) + servers(0).consumerCoordinator.offsetsTopicConfigs) } override def tearDown() { diff --git a/core/src/test/scala/other/kafka/TestOffsetManager.scala b/core/src/test/scala/other/kafka/TestOffsetManager.scala index 8047da4..e0e46c8 100644 --- a/core/src/test/scala/other/kafka/TestOffsetManager.scala +++ b/core/src/test/scala/other/kafka/TestOffsetManager.scala @@ -1,3 +1,20 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package other.kafka import org.I0Itec.zkclient.ZkClient diff --git a/core/src/test/scala/unit/kafka/admin/TopicCommandTest.scala b/core/src/test/scala/unit/kafka/admin/TopicCommandTest.scala index c7136f2..dcd6988 100644 --- a/core/src/test/scala/unit/kafka/admin/TopicCommandTest.scala +++ b/core/src/test/scala/unit/kafka/admin/TopicCommandTest.scala @@ -22,9 +22,9 @@ import org.scalatest.junit.JUnit3Suite import kafka.utils.Logging import kafka.utils.TestUtils import kafka.zk.ZooKeeperTestHarness -import kafka.server.{OffsetManager, KafkaConfig} import kafka.admin.TopicCommand.TopicCommandOptions import kafka.utils.ZkUtils +import kafka.coordinator.ConsumerCoordinator class TopicCommandTest extends JUnit3Suite with ZooKeeperTestHarness with Logging { @@ -87,12 +87,12 @@ class TopicCommandTest extends JUnit3Suite with ZooKeeperTestHarness with Loggin // create the offset topic val createOffsetTopicOpts = new TopicCommandOptions(Array("--partitions", numPartitionsOriginal.toString, "--replication-factor", "1", - "--topic", OffsetManager.OffsetsTopicName)) + "--topic", ConsumerCoordinator.OffsetsTopicName)) TopicCommand.createTopic(zkClient, createOffsetTopicOpts) // try to delete the OffsetManager.OffsetsTopicName and make sure it doesn't - val deleteOffsetTopicOpts = new TopicCommandOptions(Array("--topic", OffsetManager.OffsetsTopicName)) - val deleteOffsetTopicPath = ZkUtils.getDeleteTopicPath(OffsetManager.OffsetsTopicName) + val deleteOffsetTopicOpts = new TopicCommandOptions(Array("--topic", ConsumerCoordinator.OffsetsTopicName)) + val deleteOffsetTopicPath = ZkUtils.getDeleteTopicPath(ConsumerCoordinator.OffsetsTopicName) assertFalse("Delete path for topic shouldn't exist before deletion.", zkClient.exists(deleteOffsetTopicPath)) intercept[AdminOperationException] { TopicCommand.deleteTopic(zkClient, deleteOffsetTopicOpts) diff --git a/core/src/test/scala/unit/kafka/consumer/TopicFilterTest.scala b/core/src/test/scala/unit/kafka/consumer/TopicFilterTest.scala index 4f124af..4b326d0 100644 --- a/core/src/test/scala/unit/kafka/consumer/TopicFilterTest.scala +++ b/core/src/test/scala/unit/kafka/consumer/TopicFilterTest.scala @@ -22,6 +22,7 @@ import junit.framework.Assert._ import org.scalatest.junit.JUnitSuite import org.junit.Test import kafka.server.OffsetManager +import kafka.coordinator.ConsumerCoordinator class TopicFilterTest extends JUnitSuite { @@ -37,8 +38,8 @@ class TopicFilterTest extends JUnitSuite { val topicFilter2 = new Whitelist(".+") assertTrue(topicFilter2.isTopicAllowed("alltopics", excludeInternalTopics = true)) - assertFalse(topicFilter2.isTopicAllowed(OffsetManager.OffsetsTopicName, excludeInternalTopics = true)) - assertTrue(topicFilter2.isTopicAllowed(OffsetManager.OffsetsTopicName, excludeInternalTopics = false)) + assertFalse(topicFilter2.isTopicAllowed(ConsumerCoordinator.OffsetsTopicName, excludeInternalTopics = true)) + assertTrue(topicFilter2.isTopicAllowed(ConsumerCoordinator.OffsetsTopicName, excludeInternalTopics = false)) val topicFilter3 = new Whitelist("white_listed-topic.+") assertTrue(topicFilter3.isTopicAllowed("white_listed-topic1", excludeInternalTopics = true)) @@ -57,8 +58,8 @@ class TopicFilterTest extends JUnitSuite { assertFalse(topicFilter1.isTopicAllowed("black1", excludeInternalTopics = true)) assertFalse(topicFilter1.isTopicAllowed("black1", excludeInternalTopics = false)) - assertFalse(topicFilter1.isTopicAllowed(OffsetManager.OffsetsTopicName, excludeInternalTopics = true)) - assertTrue(topicFilter1.isTopicAllowed(OffsetManager.OffsetsTopicName, excludeInternalTopics = false)) + assertFalse(topicFilter1.isTopicAllowed(ConsumerCoordinator.OffsetsTopicName, excludeInternalTopics = true)) + assertTrue(topicFilter1.isTopicAllowed(ConsumerCoordinator.OffsetsTopicName, excludeInternalTopics = false)) } @Test diff --git a/core/src/test/scala/unit/kafka/coordinator/ConsumerCoordinatorResponseTest.scala b/core/src/test/scala/unit/kafka/coordinator/ConsumerCoordinatorResponseTest.scala new file mode 100644 index 0000000..87a5330 --- /dev/null +++ b/core/src/test/scala/unit/kafka/coordinator/ConsumerCoordinatorResponseTest.scala @@ -0,0 +1,327 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.coordinator + + +import java.util.concurrent.TimeUnit + +import junit.framework.Assert._ +import kafka.common.TopicAndPartition +import kafka.server.{OffsetManager, ReplicaManager, KafkaConfig} +import kafka.utils.{KafkaScheduler, TestUtils} +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.requests.JoinGroupRequest +import org.easymock.EasyMock +import org.junit.{After, Before, Test} +import org.scalatest.junit.JUnitSuite + +import scala.concurrent.duration.Duration +import scala.concurrent.{Await, Future, Promise} + +/** + * Test ConsumerCoordinator responses + */ +class ConsumerCoordinatorResponseTest extends JUnitSuite { + type JoinGroupCallbackParams = (Set[TopicAndPartition], String, Int, Short) + type JoinGroupCallback = (Set[TopicAndPartition], String, Int, Short) => Unit + type HeartbeatCallbackParams = Short + type HeartbeatCallback = Short => Unit + + val ConsumerMinSessionTimeout = 10 + val ConsumerMaxSessionTimeout = 100 + val DefaultSessionTimeout = 20 + var consumerCoordinator: ConsumerCoordinator = null + var offsetManager : OffsetManager = null + + @Before + def setUp() { + val props = TestUtils.createBrokerConfig(nodeId = 0, zkConnect = "") + props.setProperty(KafkaConfig.ConsumerMinSessionTimeoutMsProp, ConsumerMinSessionTimeout.toString) + props.setProperty(KafkaConfig.ConsumerMaxSessionTimeoutMsProp, ConsumerMaxSessionTimeout.toString) + offsetManager = EasyMock.createStrictMock(classOf[OffsetManager]) + consumerCoordinator = ConsumerCoordinator.create(KafkaConfig.fromProps(props), null, offsetManager) + consumerCoordinator.startup() + } + + @After + def tearDown() { + EasyMock.reset(offsetManager) + consumerCoordinator.shutdown() + } + + @Test + def testJoinGroupWrongCoordinator() { + val groupId = "groupId" + val consumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID + val partitionAssignmentStrategy = "range" + + val joinGroupResult = joinGroup(groupId, consumerId, partitionAssignmentStrategy, DefaultSessionTimeout, isCoordinatorForGroup = false) + val joinGroupErrorCode = joinGroupResult._4 + assertEquals(Errors.NOT_COORDINATOR_FOR_CONSUMER.code, joinGroupErrorCode) + } + + @Test + def testJoinGroupUnknownPartitionAssignmentStrategy() { + val groupId = "groupId" + val consumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID + val partitionAssignmentStrategy = "foo" + + val joinGroupResult = joinGroup(groupId, consumerId, partitionAssignmentStrategy, DefaultSessionTimeout, isCoordinatorForGroup = true) + val joinGroupErrorCode = joinGroupResult._4 + assertEquals(Errors.UNKNOWN_PARTITION_ASSIGNMENT_STRATEGY.code, joinGroupErrorCode) + } + + @Test + def testJoinGroupSessionTimeoutTooSmall() { + val groupId = "groupId" + val consumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID + val partitionAssignmentStrategy = "range" + + val joinGroupResult = joinGroup(groupId, consumerId, partitionAssignmentStrategy, ConsumerMinSessionTimeout - 1, isCoordinatorForGroup = true) + val joinGroupErrorCode = joinGroupResult._4 + assertEquals(Errors.INVALID_SESSION_TIMEOUT.code, joinGroupErrorCode) + } + + @Test + def testJoinGroupSessionTimeoutTooLarge() { + val groupId = "groupId" + val consumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID + val partitionAssignmentStrategy = "range" + + val joinGroupResult = joinGroup(groupId, consumerId, partitionAssignmentStrategy, ConsumerMaxSessionTimeout + 1, isCoordinatorForGroup = true) + val joinGroupErrorCode = joinGroupResult._4 + assertEquals(Errors.INVALID_SESSION_TIMEOUT.code, joinGroupErrorCode) + } + + @Test + def testJoinGroupUnknownConsumerNewGroup() { + val groupId = "groupId" + val consumerId = "consumerId" + val partitionAssignmentStrategy = "range" + + val joinGroupResult = joinGroup(groupId, consumerId, partitionAssignmentStrategy, DefaultSessionTimeout, isCoordinatorForGroup = true) + val joinGroupErrorCode = joinGroupResult._4 + assertEquals(Errors.UNKNOWN_CONSUMER_ID.code, joinGroupErrorCode) + } + + @Test + def testValidJoinGroup() { + val groupId = "groupId" + val consumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID + val partitionAssignmentStrategy = "range" + + val joinGroupResult = joinGroup(groupId, consumerId, partitionAssignmentStrategy, DefaultSessionTimeout, isCoordinatorForGroup = true) + val joinGroupErrorCode = joinGroupResult._4 + assertEquals(Errors.NONE.code, joinGroupErrorCode) + } + + @Test + def testJoinGroupInconsistentPartitionAssignmentStrategy() { + val groupId = "groupId" + val consumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID + val otherConsumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID + val partitionAssignmentStrategy = "range" + val otherPartitionAssignmentStrategy = "roundrobin" + + val joinGroupResult = joinGroup(groupId, consumerId, partitionAssignmentStrategy, DefaultSessionTimeout, isCoordinatorForGroup = true) + val joinGroupErrorCode = joinGroupResult._4 + assertEquals(Errors.NONE.code, joinGroupErrorCode) + + EasyMock.reset(offsetManager) + val otherJoinGroupResult = joinGroup(groupId, otherConsumerId, otherPartitionAssignmentStrategy, DefaultSessionTimeout, isCoordinatorForGroup = true) + val otherJoinGroupErrorCode = otherJoinGroupResult._4 + assertEquals(Errors.INCONSISTENT_PARTITION_ASSIGNMENT_STRATEGY.code, otherJoinGroupErrorCode) + } + + @Test + def testJoinGroupUnknownConsumerExistingGroup() { + val groupId = "groupId" + val consumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID + val otherConsumerId = "consumerId" + val partitionAssignmentStrategy = "range" + + val joinGroupResult = joinGroup(groupId, consumerId, partitionAssignmentStrategy, DefaultSessionTimeout, isCoordinatorForGroup = true) + val joinGroupErrorCode = joinGroupResult._4 + assertEquals(Errors.NONE.code, joinGroupErrorCode) + + EasyMock.reset(offsetManager) + val otherJoinGroupResult = joinGroup(groupId, otherConsumerId, partitionAssignmentStrategy, DefaultSessionTimeout, isCoordinatorForGroup = true) + val otherJoinGroupErrorCode = otherJoinGroupResult._4 + assertEquals(Errors.UNKNOWN_CONSUMER_ID.code, otherJoinGroupErrorCode) + } + + @Test + def testHeartbeatWrongCoordinator() { + val groupId = "groupId" + val consumerId = "consumerId" + + val heartbeatResult = heartbeat(groupId, consumerId, -1, isCoordinatorForGroup = false) + assertEquals(Errors.NOT_COORDINATOR_FOR_CONSUMER.code, heartbeatResult) + } + + @Test + def testHeartbeatUnknownGroup() { + val groupId = "groupId" + val consumerId = "consumerId" + + val heartbeatResult = heartbeat(groupId, consumerId, -1, isCoordinatorForGroup = true) + assertEquals(Errors.UNKNOWN_CONSUMER_ID.code, heartbeatResult) + } + + @Test + def testHeartbeatUnknownConsumerExistingGroup() { + val groupId = "groupId" + val consumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID + val otherConsumerId = "consumerId" + val partitionAssignmentStrategy = "range" + + val joinGroupResult = joinGroup(groupId, consumerId, partitionAssignmentStrategy, DefaultSessionTimeout, isCoordinatorForGroup = true) + val joinGroupErrorCode = joinGroupResult._4 + assertEquals(Errors.NONE.code, joinGroupErrorCode) + + EasyMock.reset(offsetManager) + val heartbeatResult = heartbeat(groupId, otherConsumerId, 1, isCoordinatorForGroup = true) + assertEquals(Errors.UNKNOWN_CONSUMER_ID.code, heartbeatResult) + } + + @Test + def testHeartbeatIllegalGeneration() { + val groupId = "groupId" + val consumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID + val partitionAssignmentStrategy = "range" + + val joinGroupResult = joinGroup(groupId, consumerId, partitionAssignmentStrategy, DefaultSessionTimeout, isCoordinatorForGroup = true) + val assignedConsumerId = joinGroupResult._2 + val joinGroupErrorCode = joinGroupResult._4 + assertEquals(Errors.NONE.code, joinGroupErrorCode) + + EasyMock.reset(offsetManager) + val heartbeatResult = heartbeat(groupId, assignedConsumerId, 2, isCoordinatorForGroup = true) + assertEquals(Errors.ILLEGAL_GENERATION.code, heartbeatResult) + } + + @Test + def testValidHeartbeat() { + val groupId = "groupId" + val consumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID + val partitionAssignmentStrategy = "range" + + val joinGroupResult = joinGroup(groupId, consumerId, partitionAssignmentStrategy, DefaultSessionTimeout, isCoordinatorForGroup = true) + val assignedConsumerId = joinGroupResult._2 + val joinGroupErrorCode = joinGroupResult._4 + assertEquals(Errors.NONE.code, joinGroupErrorCode) + + EasyMock.reset(offsetManager) + val heartbeatResult = heartbeat(groupId, assignedConsumerId, 1, isCoordinatorForGroup = true) + assertEquals(Errors.NONE.code, heartbeatResult) + } + + @Test + def testHeartbeatDuringRebalanceCausesIllegalGeneration() { + val groupId = "groupId" + val partitionAssignmentStrategy = "range" + + // First start up a group (with a slightly larger timeout to give us time to heartbeat when the rebalance starts) + val joinGroupResult = joinGroup(groupId, JoinGroupRequest.UNKNOWN_CONSUMER_ID, partitionAssignmentStrategy, + 100, isCoordinatorForGroup = true) + val assignedConsumerId = joinGroupResult._2 + val initialGenerationId = joinGroupResult._3 + val joinGroupErrorCode = joinGroupResult._4 + assertEquals(Errors.NONE.code, joinGroupErrorCode) + + // Then join with a new consumer to trigger a rebalance + EasyMock.reset(offsetManager) + sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_CONSUMER_ID, partitionAssignmentStrategy, + DefaultSessionTimeout, isCoordinatorForGroup = true) + + // We should be in the middle of a rebalance, so the heartbeat should return illegal generation + EasyMock.reset(offsetManager) + val heartbeatResult = heartbeat(groupId, assignedConsumerId, initialGenerationId, isCoordinatorForGroup = true) + assertEquals(Errors.ILLEGAL_GENERATION.code, heartbeatResult) + } + + @Test + def testGenerationIdIncrementsOnRebalance() { + val groupId = "groupId" + val consumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID + val otherConsumerId = JoinGroupRequest.UNKNOWN_CONSUMER_ID + val partitionAssignmentStrategy = "range" + + val joinGroupResult = joinGroup(groupId, consumerId, partitionAssignmentStrategy, DefaultSessionTimeout, isCoordinatorForGroup = true) + val initialGenerationId = joinGroupResult._3 + val joinGroupErrorCode = joinGroupResult._4 + assertEquals(1, initialGenerationId) + assertEquals(Errors.NONE.code, joinGroupErrorCode) + + EasyMock.reset(offsetManager) + val otherJoinGroupResult = joinGroup(groupId, otherConsumerId, partitionAssignmentStrategy, DefaultSessionTimeout, isCoordinatorForGroup = true) + val nextGenerationId = otherJoinGroupResult._3 + val otherJoinGroupErrorCode = otherJoinGroupResult._4 + assertEquals(2, nextGenerationId) + assertEquals(Errors.NONE.code, otherJoinGroupErrorCode) + } + + private def setupJoinGroupCallback: (Future[JoinGroupCallbackParams], JoinGroupCallback) = { + val responsePromise = Promise[JoinGroupCallbackParams] + val responseFuture = responsePromise.future + val responseCallback: JoinGroupCallback = (partitions, consumerId, generationId, errorCode) => + responsePromise.success((partitions, consumerId, generationId, errorCode)) + (responseFuture, responseCallback) + } + + private def setupHeartbeatCallback: (Future[HeartbeatCallbackParams], HeartbeatCallback) = { + val responsePromise = Promise[HeartbeatCallbackParams] + val responseFuture = responsePromise.future + val responseCallback: HeartbeatCallback = errorCode => responsePromise.success(errorCode) + (responseFuture, responseCallback) + } + + private def sendJoinGroup(groupId: String, + consumerId: String, + partitionAssignmentStrategy: String, + sessionTimeout: Int, + isCoordinatorForGroup: Boolean): Future[JoinGroupCallbackParams] = { + val (responseFuture, responseCallback) = setupJoinGroupCallback + EasyMock.expect(offsetManager.partitionFor(groupId)).andReturn(1) + EasyMock.expect(offsetManager.leaderIsLocal(1)).andReturn(isCoordinatorForGroup) + EasyMock.replay(offsetManager) + consumerCoordinator.handleJoinGroup(groupId, consumerId, Set.empty, sessionTimeout, partitionAssignmentStrategy, responseCallback) + responseFuture + } + + private def joinGroup(groupId: String, + consumerId: String, + partitionAssignmentStrategy: String, + sessionTimeout: Int, + isCoordinatorForGroup: Boolean): JoinGroupCallbackParams = { + val responseFuture = sendJoinGroup(groupId, consumerId, partitionAssignmentStrategy, sessionTimeout, isCoordinatorForGroup) + Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS)) + } + + private def heartbeat(groupId: String, + consumerId: String, + generationId: Int, + isCoordinatorForGroup: Boolean): HeartbeatCallbackParams = { + val (responseFuture, responseCallback) = setupHeartbeatCallback + EasyMock.expect(offsetManager.partitionFor(groupId)).andReturn(1) + EasyMock.expect(offsetManager.leaderIsLocal(1)).andReturn(isCoordinatorForGroup) + EasyMock.replay(offsetManager) + consumerCoordinator.handleHeartbeat(groupId, consumerId, generationId, responseCallback) + Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS)) + } +} diff --git a/core/src/test/scala/unit/kafka/coordinator/CoordinatorMetadataTest.scala b/core/src/test/scala/unit/kafka/coordinator/CoordinatorMetadataTest.scala index 08854c5..2cbf6e2 100644 --- a/core/src/test/scala/unit/kafka/coordinator/CoordinatorMetadataTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/CoordinatorMetadataTest.scala @@ -40,7 +40,7 @@ class CoordinatorMetadataTest extends JUnitSuite { def setUp() { val props = TestUtils.createBrokerConfig(nodeId = 0, zkConnect = "") zkClient = EasyMock.createStrictMock(classOf[ZkClient]) - coordinatorMetadata = new CoordinatorMetadata(KafkaConfig.fromProps(props), zkClient, null) + coordinatorMetadata = new CoordinatorMetadata(KafkaConfig.fromProps(props).brokerId, zkClient, null) } @Test diff --git a/core/src/test/scala/unit/kafka/integration/TopicMetadataTest.scala b/core/src/test/scala/unit/kafka/integration/TopicMetadataTest.scala index 995b059..a95ee5e 100644 --- a/core/src/test/scala/unit/kafka/integration/TopicMetadataTest.scala +++ b/core/src/test/scala/unit/kafka/integration/TopicMetadataTest.scala @@ -17,28 +17,32 @@ package kafka.integration -import org.apache.kafka.common.protocol.SecurityProtocol -import org.scalatest.junit.JUnit3Suite -import kafka.zk.ZooKeeperTestHarness -import kafka.admin.AdminUtils import java.nio.ByteBuffer + import junit.framework.Assert._ -import kafka.cluster.{BrokerEndPoint, Broker} +import kafka.admin.AdminUtils +import kafka.api.{TopicMetadataResponse, TopicMetadataRequest} +import kafka.client.ClientUtils +import kafka.cluster.{Broker, BrokerEndPoint} +import kafka.common.ErrorMapping +import kafka.server.{NotRunning, KafkaConfig, KafkaServer} import kafka.utils.TestUtils import kafka.utils.TestUtils._ -import kafka.server.{KafkaServer, KafkaConfig} -import kafka.api.TopicMetadataRequest -import kafka.common.ErrorMapping -import kafka.client.ClientUtils +import kafka.zk.ZooKeeperTestHarness +import org.apache.kafka.common.protocol.SecurityProtocol +import org.scalatest.junit.JUnit3Suite class TopicMetadataTest extends JUnit3Suite with ZooKeeperTestHarness { private var server1: KafkaServer = null var brokerEndPoints: Seq[BrokerEndPoint] = null + var adHocConfigs: Seq[KafkaConfig] = null + val numConfigs: Int = 2 override def setUp() { super.setUp() - val props = createBrokerConfigs(1, zkConnect) - val configs = props.map(KafkaConfig.fromProps) + val props = createBrokerConfigs(numConfigs, zkConnect) + val configs: Seq[KafkaConfig] = props.map(KafkaConfig.fromProps) + adHocConfigs = configs.takeRight(configs.size - 1) // Started and stopped by individual test cases server1 = TestUtils.createServer(configs.head) brokerEndPoints = Seq(new Broker(server1.config.brokerId, server1.config.hostName, server1.boundPort()).getBrokerEndPoint(SecurityProtocol.PLAINTEXT)) } @@ -130,4 +134,62 @@ class TopicMetadataTest extends JUnit3Suite with ZooKeeperTestHarness { assertEquals(1, partitionMetadata.head.replicas.size) assertTrue(partitionMetadata.head.leader.isDefined) } + + private def checkIsr(servers: Seq[KafkaServer]): Unit = { + val activeBrokers: Seq[KafkaServer] = servers.filter(x => x.brokerState.currentState != NotRunning.state) + val expectedIsr: Seq[BrokerEndPoint] = activeBrokers.map( + x => new BrokerEndPoint(x.config.brokerId, + if (x.config.hostName.nonEmpty) x.config.hostName else "localhost", + x.boundPort()) + ) + + // Assert that topic metadata at new brokers is updated correctly + activeBrokers.foreach(x => { + var metadata: TopicMetadataResponse = new TopicMetadataResponse(Seq(), Seq(), -1) + waitUntilTrue(() => { + metadata = ClientUtils.fetchTopicMetadata( + Set.empty, + Seq(new BrokerEndPoint( + x.config.brokerId, + if (x.config.hostName.nonEmpty) x.config.hostName else "localhost", + x.boundPort())), + "TopicMetadataTest-testBasicTopicMetadata", + 2000, 0) + metadata.topicsMetadata.nonEmpty && + metadata.topicsMetadata.head.partitionsMetadata.nonEmpty && + expectedIsr == metadata.topicsMetadata.head.partitionsMetadata.head.isr + }, + "Topic metadata is not correctly updated for broker " + x + ".\n" + + "Expected ISR: " + expectedIsr + "\n" + + "Actual ISR : " + (if (metadata.topicsMetadata.nonEmpty && + metadata.topicsMetadata.head.partitionsMetadata.nonEmpty) + metadata.topicsMetadata.head.partitionsMetadata.head.isr + else + "")) + }) + } + + + def testIsrAfterBrokerShutDownAndJoinsBack { + // start adHoc brokers + val adHocServers = adHocConfigs.map(p => createServer(p)) + val allServers: Seq[KafkaServer] = Seq(server1) ++ adHocServers + + // create topic + val topic: String = "test" + AdminUtils.createTopic(zkClient, topic, 1, numConfigs) + + // shutdown a broker + adHocServers.last.shutdown() + adHocServers.last.awaitShutdown() + + // startup a broker + adHocServers.last.startup() + + // check metadata is still correct and updated at all brokers + checkIsr(allServers) + + // shutdown adHoc brokers + adHocServers.map(p => p.shutdown()) + } } diff --git a/core/src/test/scala/unit/kafka/log4j/KafkaLog4jAppenderTest.scala b/core/src/test/scala/unit/kafka/log4j/KafkaLog4jAppenderTest.scala deleted file mode 100755 index 41366a1..0000000 --- a/core/src/test/scala/unit/kafka/log4j/KafkaLog4jAppenderTest.scala +++ /dev/null @@ -1,143 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka.log4j - -import kafka.consumer.SimpleConsumer -import kafka.server.{KafkaConfig, KafkaServer} -import kafka.utils.{TestUtils, CoreUtils, Logging} -import kafka.api.FetchRequestBuilder -import kafka.producer.async.MissingConfigException -import kafka.serializer.Encoder -import kafka.zk.ZooKeeperTestHarness - -import java.util.Properties -import java.io.File - -import org.apache.log4j.spi.LoggingEvent -import org.apache.log4j.{PropertyConfigurator, Logger} -import org.junit.{After, Before, Test} -import org.scalatest.junit.JUnit3Suite - -import junit.framework.Assert._ - -class KafkaLog4jAppenderTest extends JUnit3Suite with ZooKeeperTestHarness with Logging { - - var logDirZk: File = null - var config: KafkaConfig = null - var server: KafkaServer = null - - var simpleConsumerZk: SimpleConsumer = null - - val tLogger = Logger.getLogger(getClass()) - - private val brokerZk = 0 - - @Before - override def setUp() { - super.setUp() - - val propsZk = TestUtils.createBrokerConfig(brokerZk, zkConnect) - val logDirZkPath = propsZk.getProperty("log.dir") - logDirZk = new File(logDirZkPath) - config = KafkaConfig.fromProps(propsZk) - server = TestUtils.createServer(config) - simpleConsumerZk = new SimpleConsumer("localhost", server.boundPort(), 1000000, 64 * 1024, "") - } - - @After - override def tearDown() { - simpleConsumerZk.close - server.shutdown - CoreUtils.rm(logDirZk) - super.tearDown() - } - - @Test - def testKafkaLog4jConfigs() { - // host missing - var props = new Properties() - props.put("log4j.rootLogger", "INFO") - props.put("log4j.appender.KAFKA", "kafka.producer.KafkaLog4jAppender") - props.put("log4j.appender.KAFKA.layout", "org.apache.log4j.PatternLayout") - props.put("log4j.appender.KAFKA.layout.ConversionPattern", "%-5p: %c - %m%n") - props.put("log4j.appender.KAFKA.Topic", "test-topic") - props.put("log4j.logger.kafka.log4j", "INFO, KAFKA") - - try { - PropertyConfigurator.configure(props) - fail("Missing properties exception was expected !") - } catch { - case e: MissingConfigException => - } - - // topic missing - props = new Properties() - props.put("log4j.rootLogger", "INFO") - props.put("log4j.appender.KAFKA", "kafka.producer.KafkaLog4jAppender") - props.put("log4j.appender.KAFKA.layout", "org.apache.log4j.PatternLayout") - props.put("log4j.appender.KAFKA.layout.ConversionPattern", "%-5p: %c - %m%n") - props.put("log4j.appender.KAFKA.brokerList", TestUtils.getBrokerListStrFromServers(Seq(server))) - props.put("log4j.logger.kafka.log4j", "INFO, KAFKA") - - try { - PropertyConfigurator.configure(props) - fail("Missing properties exception was expected !") - } catch { - case e: MissingConfigException => - } - } - - @Test - def testLog4jAppends() { - PropertyConfigurator.configure(getLog4jConfig) - - for(i <- 1 to 5) - info("test") - - val response = simpleConsumerZk.fetch(new FetchRequestBuilder().addFetch("test-topic", 0, 0L, 1024*1024).build()) - val fetchMessage = response.messageSet("test-topic", 0) - - var count = 0 - for(message <- fetchMessage) { - count = count + 1 - } - - assertEquals(5, count) - } - - private def getLog4jConfig: Properties = { - val props = new Properties() - props.put("log4j.rootLogger", "INFO") - props.put("log4j.appender.KAFKA", "kafka.producer.KafkaLog4jAppender") - props.put("log4j.appender.KAFKA.layout", "org.apache.log4j.PatternLayout") - props.put("log4j.appender.KAFKA.layout.ConversionPattern", "%-5p: %c - %m%n") - props.put("log4j.appender.KAFKA.BrokerList", TestUtils.getBrokerListStrFromServers(Seq(server))) - props.put("log4j.appender.KAFKA.Topic", "test-topic") - props.put("log4j.appender.KAFKA.RequiredNumAcks", "1") - props.put("log4j.appender.KAFKA.SyncSend", "true") - props.put("log4j.logger.kafka.log4j", "INFO, KAFKA") - props - } -} - -class AppenderStringEncoder(encoding: String = "UTF-8") extends Encoder[LoggingEvent] { - def toBytes(event: LoggingEvent): Array[Byte] = { - event.getMessage.toString.getBytes(encoding) - } -} - diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala index 2428dbd..d354452 100755 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala @@ -155,13 +155,12 @@ class KafkaConfigTest extends JUnit3Suite { @Test def testAdvertiseConfigured() { - val port = "9999" val advertisedHostName = "routable-host" val advertisedPort = "1234" val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect) props.put(KafkaConfig.AdvertisedHostNameProp, advertisedHostName) - props.put(KafkaConfig.AdvertisedPortProp, advertisedPort.toString) + props.put(KafkaConfig.AdvertisedPortProp, advertisedPort) val serverConfig = KafkaConfig.fromProps(props) val endpoints = serverConfig.advertisedListeners @@ -170,8 +169,41 @@ class KafkaConfigTest extends JUnit3Suite { assertEquals(endpoint.host, advertisedHostName) assertEquals(endpoint.port, advertisedPort.toInt) } + + @Test + def testAdvertisePortDefault() { + val advertisedHostName = "routable-host" + val port = "9999" + + val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect) + props.put(KafkaConfig.AdvertisedHostNameProp, advertisedHostName) + props.put(KafkaConfig.PortProp, port) + + val serverConfig = KafkaConfig.fromProps(props) + val endpoints = serverConfig.advertisedListeners + val endpoint = endpoints.get(SecurityProtocol.PLAINTEXT).get + + assertEquals(endpoint.host, advertisedHostName) + assertEquals(endpoint.port, port.toInt) + } + + @Test + def testAdvertiseHostNameDefault() { + val hostName = "routable-host" + val advertisedPort = "9999" + + val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect) + props.put(KafkaConfig.HostNameProp, hostName) + props.put(KafkaConfig.AdvertisedPortProp, advertisedPort) + val serverConfig = KafkaConfig.fromProps(props) + val endpoints = serverConfig.advertisedListeners + val endpoint = endpoints.get(SecurityProtocol.PLAINTEXT).get + assertEquals(endpoint.host, hostName) + assertEquals(endpoint.port, advertisedPort.toInt) + } + @Test def testDuplicateListeners() { val props = new Properties() diff --git a/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala b/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala index 528525b..39a6852 100755 --- a/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala +++ b/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala @@ -120,7 +120,7 @@ class OffsetCommitTest extends JUnit3Suite with ZooKeeperTestHarness { val fetchRequest2 = OffsetFetchRequest(group, Seq(unknownTopicAndPartition)) val fetchResponse2 = simpleConsumer.fetchOffsets(fetchRequest2) - assertEquals(OffsetMetadataAndError.UnknownTopicOrPartition, fetchResponse2.requestInfo.get(unknownTopicAndPartition).get) + assertEquals(OffsetMetadataAndError.NoOffset, fetchResponse2.requestInfo.get(unknownTopicAndPartition).get) assertEquals(1, fetchResponse2.requestInfo.size) } @@ -166,14 +166,14 @@ class OffsetCommitTest extends JUnit3Suite with ZooKeeperTestHarness { assertEquals(ErrorMapping.NoError, fetchResponse.requestInfo.get(TopicAndPartition(topic2, 1)).get.error) assertEquals(ErrorMapping.NoError, fetchResponse.requestInfo.get(TopicAndPartition(topic3, 0)).get.error) - assertEquals(ErrorMapping.UnknownTopicOrPartitionCode, fetchResponse.requestInfo.get(TopicAndPartition(topic3, 1)).get.error) - assertEquals(OffsetMetadataAndError.UnknownTopicOrPartition, fetchResponse.requestInfo.get(TopicAndPartition(topic3, 1)).get) + assertEquals(ErrorMapping.NoError, fetchResponse.requestInfo.get(TopicAndPartition(topic3, 1)).get.error) + assertEquals(OffsetMetadataAndError.NoOffset, fetchResponse.requestInfo.get(TopicAndPartition(topic3, 1)).get) assertEquals(ErrorMapping.NoError, fetchResponse.requestInfo.get(TopicAndPartition(topic4, 0)).get.error) assertEquals(OffsetMetadataAndError.NoOffset, fetchResponse.requestInfo.get(TopicAndPartition(topic4, 0)).get) - assertEquals(ErrorMapping.UnknownTopicOrPartitionCode, fetchResponse.requestInfo.get(TopicAndPartition(topic5, 0)).get.error) - assertEquals(OffsetMetadataAndError.UnknownTopicOrPartition, fetchResponse.requestInfo.get(TopicAndPartition(topic5, 0)).get) + assertEquals(ErrorMapping.NoError, fetchResponse.requestInfo.get(TopicAndPartition(topic5, 0)).get.error) + assertEquals(OffsetMetadataAndError.NoOffset, fetchResponse.requestInfo.get(TopicAndPartition(topic5, 0)).get) assertEquals("metadata one", fetchResponse.requestInfo.get(TopicAndPartition(topic1, 0)).get.metadata) assertEquals("metadata two", fetchResponse.requestInfo.get(TopicAndPartition(topic2, 0)).get.metadata) diff --git a/core/src/test/scala/unit/kafka/utils/ReplicationUtilsTest.scala b/core/src/test/scala/unit/kafka/utils/ReplicationUtilsTest.scala index c96c0ff..b9de8d6 100644 --- a/core/src/test/scala/unit/kafka/utils/ReplicationUtilsTest.scala +++ b/core/src/test/scala/unit/kafka/utils/ReplicationUtilsTest.scala @@ -70,6 +70,8 @@ class ReplicationUtilsTest extends JUnit3Suite with ZooKeeperTestHarness { EasyMock.expect(replicaManager.zkClient).andReturn(zkClient) EasyMock.replay(replicaManager) + ZkUtils.makeSurePersistentPathExists(zkClient, ZkUtils.IsrChangeNotificationPath) + val replicas = List(0,1) // regular update diff --git a/gradle/buildscript.gradle b/gradle/buildscript.gradle index 5e45c06..047632b 100644 --- a/gradle/buildscript.gradle +++ b/gradle/buildscript.gradle @@ -1,3 +1,18 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + repositories { repositories { // For license plugin. @@ -6,7 +21,3 @@ repositories { } } } - -dependencies { - classpath 'nl.javadude.gradle.plugins:license-gradle-plugin:0.10.0' -} diff --git a/gradle/license.gradle b/gradle/license.gradle deleted file mode 100644 index b4b62eb..0000000 --- a/gradle/license.gradle +++ /dev/null @@ -1,9 +0,0 @@ -subprojects { - apply plugin: 'license' - - license { - header rootProject.file('HEADER') - // Skip Twitter bootstrap JS and CSS. - skipExistingHeaders = true - } -} diff --git a/gradle/rat.gradle b/gradle/rat.gradle new file mode 100644 index 0000000..d62b372 --- /dev/null +++ b/gradle/rat.gradle @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.gradle.api.Plugin +import org.gradle.api.Project +import org.gradle.api.Task +import org.gradle.api.internal.project.IsolatedAntBuilder + +apply plugin: RatPlugin + +class RatTask extends DefaultTask { + @Input + List excludes + + def reportPath = 'build/rat' + def stylesheet = 'gradle/resources/rat-output-to-html.xsl' + def xmlReport = reportPath + '/rat-report.xml' + def htmlReport = reportPath + '/rat-report.html' + + def generateXmlReport(File reportDir) { + def antBuilder = services.get(IsolatedAntBuilder) + def ratClasspath = project.configurations.rat + antBuilder.withClasspath(ratClasspath).execute { + ant.taskdef(resource: 'org/apache/rat/anttasks/antlib.xml') + ant.report(format: 'xml', reportFile: xmlReport) { + fileset(dir: ".") { + patternset { + excludes.each { + exclude(name: it) + } + } + } + } + } + } + + def printUnknownFiles() { + def ratXml = new XmlParser().parse(xmlReport) + def unknownLicenses = 0 + ratXml.resource.each { resource -> + if (resource.'license-approval'.@name[0] == "false") { + println('Unknown license: ' + resource.@name) + unknownLicenses++ + } + } + if (unknownLicenses > 0) { + throw new GradleException("Found " + unknownLicenses + " files with " + + "unknown licenses.") + } + } + + def generateHtmlReport() { + def antBuilder = services.get(IsolatedAntBuilder) + def ratClasspath = project.configurations.rat + antBuilder.withClasspath(ratClasspath).execute { + ant.xslt( + in: xmlReport, + style: stylesheet, + out: htmlReport, + classpath: ratClasspath) + } + println('Rat report: ' + htmlReport) + } + + @TaskAction + def rat() { + File reportDir = new File(reportPath) + if (!reportDir.exists()) { + reportDir.mkdirs() + } + generateXmlReport(reportDir) + printUnknownFiles() + generateHtmlReport() + } +} + +class RatPlugin implements Plugin { + void apply(Project project) { + configureDependencies(project) + project.plugins.apply(JavaPlugin); + Task ratTask = project.task("rat", + type: RatTask, + group: 'Build', + description: 'Runs Apache Rat checks.') + project.tasks[JavaPlugin.TEST_TASK_NAME].dependsOn ratTask + } + + void configureDependencies(final Project project) { + project.configurations { + rat + } + project.repositories { + mavenCentral() + } + project.dependencies { + rat 'org.apache.rat:apache-rat-tasks:0.11' + } + } +} diff --git a/gradle/resources/rat-output-to-html.xsl b/gradle/resources/rat-output-to-html.xsl new file mode 100644 index 0000000..97ea7a1 --- /dev/null +++ b/gradle/resources/rat-output-to-html.xsl @@ -0,0 +1,206 @@ + + + + + + + + + + + + + + + + + + + + + + +

      Rat Report

      +

      This HTML version (yes, it is!) is generated from the RAT xml reports using Saxon9B. All the outputs required are displayed below, similar to the .txt version. + This is obviously a work in progress; and a prettier, easier to read and manage version will be available soon

      +
      + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +Table 1: A snapshot summary of this rat report. +
      Notes: Binaries: Archives: Standards:
      Apache Licensed: Generated Documents:
      Note: JavaDocs are generated and so license header is optionalNote: Generated files do not require license headers
      Unknown Licenses - or files without a license. Unknown Licenses - or files without a license.
      +
      +
      +

      Unapproved Licenses:

      + + + +
      + + +
      +
      + +

      Archives:

      + + + + +
      +
      +
      + +

      + Files with Apache License headers will be marked AL
      + Binary files (which do not require AL headers) will be marked B
      + Compressed archives will be marked A
      + Notices, licenses etc will be marked N
      +

      + + + + ! + + + + N + A + B + + !!!!! + + +
      + + +
      +
      + +

      Printing headers for files without AL header...

      + + + +

      + +
      +
      +
      + + + +
      + + +
      +

      Resource:

      + +
      +
      + + + +

      First few lines of non-compliant file

      +

      + +

      +
      +

      Other Info:

      +
      + + + Header Type: +
      +
      + + + License Family: +
      +
      + + + License Approval: +
      +
      + + + Type: +
      +
      + + + +
      diff --git a/kafka-merge-pr.py b/kafka-merge-pr.py new file mode 100644 index 0000000..4cc82e0 --- /dev/null +++ b/kafka-merge-pr.py @@ -0,0 +1,442 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Utility for creating well-formed pull request merges and pushing them to Apache. This script is a modified version +# of the one created by the Spark project (https://github.com/apache/spark/blob/master/dev/merge_spark_pr.py). +# +# Usage: ./kafka-merge-pr.py (see config env vars below) +# +# This utility assumes you already have local a kafka git folder and that you +# have added remotes corresponding to both: +# (i) the github apache kafka mirror and +# (ii) the apache kafka git repo. + +import json +import os +import re +import subprocess +import sys +import urllib2 + +try: + import jira.client + JIRA_IMPORTED = True +except ImportError: + JIRA_IMPORTED = False + +PROJECT_NAME = "kafka" + +CAPITALIZED_PROJECT_NAME = "kafka".upper() + +# Location of the local git repository +REPO_HOME = os.environ.get("%s_HOME" % CAPITALIZED_PROJECT_NAME, os.getcwd()) +# Remote name which points to the GitHub site +PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "apache-github") +# Remote name which points to Apache git +PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache") +# ASF JIRA username +JIRA_USERNAME = os.environ.get("JIRA_USERNAME", "") +# ASF JIRA password +JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", "") + +GITHUB_USER = os.environ.get("GITHUB_USER", "apache") +GITHUB_BASE = "https://github.com/%s/%s/pull" % (GITHUB_USER, PROJECT_NAME) +GITHUB_API_BASE = "https://api.github.com/repos/%s/%s" % (GITHUB_USER, PROJECT_NAME) +JIRA_BASE = "https://issues.apache.org/jira/browse" +JIRA_API_BASE = "https://issues.apache.org/jira" +# Prefix added to temporary branches +TEMP_BRANCH_PREFIX = "PR_TOOL" +# TODO Introduce a convention as this is too brittle +RELEASE_BRANCH_PREFIX = "0." + +DEV_BRANCH_NAME="trunk" + +def get_json(url): + try: + return json.load(urllib2.urlopen(url)) + except urllib2.HTTPError as e: + print "Unable to fetch URL, exiting: %s" % url + sys.exit(-1) + + +def fail(msg): + print msg + clean_up() + sys.exit(-1) + + +def run_cmd(cmd): + print cmd + if isinstance(cmd, list): + return subprocess.check_output(cmd) + else: + return subprocess.check_output(cmd.split(" ")) + + +def continue_maybe(prompt): + result = raw_input("\n%s (y/n): " % prompt) + if result.lower() != "y": + fail("Okay, exiting") + +def clean_up(): + print "Restoring head pointer to %s" % original_head + run_cmd("git checkout %s" % original_head) + + branches = run_cmd("git branch").replace(" ", "").split("\n") + + for branch in filter(lambda x: x.startswith(TEMP_BRANCH_PREFIX), branches): + print "Deleting local branch %s" % branch + run_cmd("git branch -D %s" % branch) + + +# merge the requested PR and return the merge hash +def merge_pr(pr_num, target_ref, title, body, pr_repo_desc): + pr_branch_name = "%s_MERGE_PR_%s" % (TEMP_BRANCH_PREFIX, pr_num) + target_branch_name = "%s_MERGE_PR_%s_%s" % (TEMP_BRANCH_PREFIX, pr_num, target_ref.upper()) + run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name)) + run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name)) + run_cmd("git checkout %s" % target_branch_name) + + had_conflicts = False + try: + run_cmd(['git', 'merge', pr_branch_name, '--squash']) + except Exception as e: + msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e + continue_maybe(msg) + msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?" + continue_maybe(msg) + had_conflicts = True + + commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name, + '--pretty=format:%an <%ae>']).split("\n") + distinct_authors = sorted(set(commit_authors), + key=lambda x: commit_authors.count(x), reverse=True) + primary_author = distinct_authors[0] + commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name, + '--pretty=format:%h [%an] %s']).split("\n\n") + + merge_message_flags = [] + + merge_message_flags += ["-m", title] + if body is not None: + # We remove @ symbols from the body to avoid triggering e-mails + # to people every time someone creates a public fork of the project. + merge_message_flags += ["-m", body.replace("@", "")] + + authors = "\n".join(["Author: %s" % a for a in distinct_authors]) + + merge_message_flags += ["-m", authors] + + if had_conflicts: + committer_name = run_cmd("git config --get user.name").strip() + committer_email = run_cmd("git config --get user.email").strip() + message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % ( + committer_name, committer_email) + merge_message_flags += ["-m", message] + + # The string "Closes #%s" string is required for GitHub to correctly close the PR + merge_message_flags += [ + "-m", + "Closes #%s from %s and squashes the following commits:" % (pr_num, pr_repo_desc)] + for c in commits: + merge_message_flags += ["-m", c] + + run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags) + + continue_maybe("Merge complete (local ref %s). Push to %s?" % ( + target_branch_name, PUSH_REMOTE_NAME)) + + try: + run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref)) + except Exception as e: + clean_up() + fail("Exception while pushing: %s" % e) + + merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8] + clean_up() + print("Pull request #%s merged!" % pr_num) + print("Merge hash: %s" % merge_hash) + return merge_hash + + +def cherry_pick(pr_num, merge_hash, default_branch): + pick_ref = raw_input("Enter a branch name [%s]: " % default_branch) + if pick_ref == "": + pick_ref = default_branch + + pick_branch_name = "%s_PICK_PR_%s_%s" % (TEMP_BRANCH_PREFIX, pr_num, pick_ref.upper()) + + run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name)) + run_cmd("git checkout %s" % pick_branch_name) + + try: + run_cmd("git cherry-pick -sx %s" % merge_hash) + except Exception as e: + msg = "Error cherry-picking: %s\nWould you like to manually fix-up this merge?" % e + continue_maybe(msg) + msg = "Okay, please fix any conflicts and finish the cherry-pick. Finished?" + continue_maybe(msg) + + continue_maybe("Pick complete (local ref %s). Push to %s?" % ( + pick_branch_name, PUSH_REMOTE_NAME)) + + try: + run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref)) + except Exception as e: + clean_up() + fail("Exception while pushing: %s" % e) + + pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8] + clean_up() + + print("Pull request #%s picked into %s!" % (pr_num, pick_ref)) + print("Pick hash: %s" % pick_hash) + return pick_ref + + +def fix_version_from_branch(branch, versions): + # Note: Assumes this is a sorted (newest->oldest) list of un-released versions + if branch == DEV_BRANCH_NAME: + return versions[0] + else: + return filter(lambda x: x.name.startswith(branch), versions)[-1] + + +def resolve_jira_issue(merge_branches, comment, default_jira_id=""): + asf_jira = jira.client.JIRA({'server': JIRA_API_BASE}, + basic_auth=(JIRA_USERNAME, JIRA_PASSWORD)) + + jira_id = raw_input("Enter a JIRA id [%s]: " % default_jira_id) + if jira_id == "": + jira_id = default_jira_id + + try: + issue = asf_jira.issue(jira_id) + except Exception as e: + fail("ASF JIRA could not find %s\n%s" % (jira_id, e)) + + cur_status = issue.fields.status.name + cur_summary = issue.fields.summary + cur_assignee = issue.fields.assignee + if cur_assignee is None: + cur_assignee = "NOT ASSIGNED!!!" + else: + cur_assignee = cur_assignee.displayName + + if cur_status == "Resolved" or cur_status == "Closed": + fail("JIRA issue %s already has status '%s'" % (jira_id, cur_status)) + print ("=== JIRA %s ===" % jira_id) + print ("summary\t\t%s\nassignee\t%s\nstatus\t\t%s\nurl\t\t%s/%s\n" % ( + cur_summary, cur_assignee, cur_status, JIRA_BASE, jira_id)) + + versions = asf_jira.project_versions(CAPITALIZED_PROJECT_NAME) + versions = sorted(versions, key=lambda x: x.name, reverse=True) + versions = filter(lambda x: x.raw['released'] is False, versions) + # Consider only x.y.z versions + versions = filter(lambda x: re.match('\d+\.\d+\.\d+', x.name), versions) + + default_fix_versions = map(lambda x: fix_version_from_branch(x, versions).name, merge_branches) + for v in default_fix_versions: + # Handles the case where we have forked a release branch but not yet made the release. + # In this case, if the PR is committed to the master branch and the release branch, we + # only consider the release branch to be the fix version. E.g. it is not valid to have + # both 1.1.0 and 1.0.0 as fix versions. + (major, minor, patch) = v.split(".") + if patch == "0": + previous = "%s.%s.%s" % (major, int(minor) - 1, 0) + if previous in default_fix_versions: + default_fix_versions = filter(lambda x: x != v, default_fix_versions) + default_fix_versions = ",".join(default_fix_versions) + + fix_versions = raw_input("Enter comma-separated fix version(s) [%s]: " % default_fix_versions) + if fix_versions == "": + fix_versions = default_fix_versions + fix_versions = fix_versions.replace(" ", "").split(",") + + def get_version_json(version_str): + return filter(lambda v: v.name == version_str, versions)[0].raw + + jira_fix_versions = map(lambda v: get_version_json(v), fix_versions) + + resolve = filter(lambda a: a['name'] == "Resolve Issue", asf_jira.transitions(jira_id))[0] + asf_jira.transition_issue(jira_id, resolve["id"], fixVersions=jira_fix_versions, comment=comment) + + print "Successfully resolved %s with fixVersions=%s!" % (jira_id, fix_versions) + + +def resolve_jira_issues(title, merge_branches, comment): + jira_ids = re.findall("%s-[0-9]{4,5}" % CAPITALIZED_PROJECT_NAME, title) + + if len(jira_ids) == 0: + resolve_jira_issue(merge_branches, comment) + for jira_id in jira_ids: + resolve_jira_issue(merge_branches, comment, jira_id) + + +def standardize_jira_ref(text): + """ + Standardize the jira reference commit message prefix to "PROJECT_NAME-XXX; Issue" + + >>> standardize_jira_ref("%s-5954; Top by key" % CAPITALIZED_PROJECT_NAME) + 'KAFKA-5954; Top by key' + >>> standardize_jira_ref("%s-5821; ParquetRelation2 CTAS should check if delete is successful" % PROJECT_NAME) + 'KAFKA-5821; ParquetRelation2 CTAS should check if delete is successful' + >>> standardize_jira_ref("%s-4123 [WIP] Show new dependencies added in pull requests" % PROJECT_NAME) + 'KAFKA-4123; [WIP] Show new dependencies added in pull requests' + >>> standardize_jira_ref("%s 5954: Top by key" % PROJECT_NAME) + 'KAFKA-5954; Top by key' + >>> standardize_jira_ref("%s-979 a LRU scheduler for load balancing in TaskSchedulerImpl" % PROJECT_NAME) + 'KAFKA-979; a LRU scheduler for load balancing in TaskSchedulerImpl' + >>> standardize_jira_ref("%s-1094 Support MiMa for reporting binary compatibility across versions." % CAPITALIZED_PROJECT_NAME) + 'KAFKA-1094; Support MiMa for reporting binary compatibility across versions.' + >>> standardize_jira_ref("[WIP] %s-1146; Vagrant support" % CAPITALIZED_PROJECT_NAME) + 'KAFKA-1146; [WIP] Vagrant support' + >>> standardize_jira_ref("%s-1032. If Yarn app fails before registering, app master stays aroun..." % PROJECT_NAME) + 'KAFKA-1032; If Yarn app fails before registering, app master stays aroun...' + >>> standardize_jira_ref("%s-6250 %s-6146 %s-5911: Types are now reserved words in DDL parser." % (PROJECT_NAME, PROJECT_NAME, CAPITALIZED_PROJECT_NAME)) + 'KAFKA-6250 KAFKA-6146 KAFKA-5911; Types are now reserved words in DDL parser.' + >>> standardize_jira_ref("Additional information for users building from source code") + 'Additional information for users building from source code' + """ + jira_refs = [] + components = [] + + # Extract JIRA ref(s): + pattern = re.compile(r'(%s[-\s]*[0-9]{3,6})+' % CAPITALIZED_PROJECT_NAME, re.IGNORECASE) + for ref in pattern.findall(text): + # Add brackets, replace spaces with a dash, & convert to uppercase + jira_refs.append(re.sub(r'\s+', '-', ref.upper())) + text = text.replace(ref, '') + + # Extract project name component(s): + # Look for alphanumeric chars, spaces, dashes, periods, and/or commas + pattern = re.compile(r'(\[[\w\s,-\.]+\])', re.IGNORECASE) + for component in pattern.findall(text): + components.append(component.upper()) + text = text.replace(component, '') + + # Cleanup any remaining symbols: + pattern = re.compile(r'^\W+(.*)', re.IGNORECASE) + if (pattern.search(text) is not None): + text = pattern.search(text).groups()[0] + + # Assemble full text (JIRA ref(s), module(s), remaining text) + jira_prefix = ' '.join(jira_refs).strip() + if jira_prefix: + jira_prefix = jira_prefix + "; " + clean_text = jira_prefix + ' '.join(components).strip() + " " + text.strip() + + # Replace multiple spaces with a single space, e.g. if no jira refs and/or components were included + clean_text = re.sub(r'\s+', ' ', clean_text.strip()) + + return clean_text + +def main(): + global original_head + + original_head = run_cmd("git rev-parse HEAD")[:8] + + branches = get_json("%s/branches" % GITHUB_API_BASE) + branch_names = filter(lambda x: x.startswith(RELEASE_BRANCH_PREFIX), [x['name'] for x in branches]) + # Assumes branch names can be sorted lexicographically + latest_branch = sorted(branch_names, reverse=True)[0] + + pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ") + pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num)) + pr_events = get_json("%s/issues/%s/events" % (GITHUB_API_BASE, pr_num)) + + url = pr["url"] + + # Decide whether to use the modified title or not + modified_title = standardize_jira_ref(pr["title"]) + if modified_title != pr["title"]: + print "I've re-written the title as follows to match the standard format:" + print "Original: %s" % pr["title"] + print "Modified: %s" % modified_title + result = raw_input("Would you like to use the modified title? (y/n): ") + if result.lower() == "y": + title = modified_title + print "Using modified title:" + else: + title = pr["title"] + print "Using original title:" + print title + else: + title = pr["title"] + + body = pr["body"] + target_ref = pr["base"]["ref"] + user_login = pr["user"]["login"] + base_ref = pr["head"]["ref"] + pr_repo_desc = "%s/%s" % (user_login, base_ref) + + # Merged pull requests don't appear as merged in the GitHub API; + # Instead, they're closed by asfgit. + merge_commits = \ + [e for e in pr_events if e["actor"]["login"] == "asfgit" and e["event"] == "closed"] + + if merge_commits: + merge_hash = merge_commits[0]["commit_id"] + message = get_json("%s/commits/%s" % (GITHUB_API_BASE, merge_hash))["commit"]["message"] + + print "Pull request %s has already been merged, assuming you want to backport" % pr_num + commit_is_downloaded = run_cmd(['git', 'rev-parse', '--quiet', '--verify', + "%s^{commit}" % merge_hash]).strip() != "" + if not commit_is_downloaded: + fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num) + + print "Found commit %s:\n%s" % (merge_hash, message) + cherry_pick(pr_num, merge_hash, latest_branch) + sys.exit(0) + + if not bool(pr["mergeable"]): + msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \ + "Continue? (experts only!)" + continue_maybe(msg) + + print ("\n=== Pull Request #%s ===" % pr_num) + print ("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % ( + title, pr_repo_desc, target_ref, url)) + continue_maybe("Proceed with merging pull request #%s?" % pr_num) + + merged_refs = [target_ref] + + merge_hash = merge_pr(pr_num, target_ref, title, body, pr_repo_desc) + + pick_prompt = "Would you like to pick %s into another branch?" % merge_hash + while raw_input("\n%s (y/n): " % pick_prompt).lower() == "y": + merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash, latest_branch)] + + if JIRA_IMPORTED: + if JIRA_USERNAME and JIRA_PASSWORD: + continue_maybe("Would you like to update an associated JIRA?") + jira_comment = "Issue resolved by pull request %s\n[%s/%s]" % (pr_num, GITHUB_BASE, pr_num) + resolve_jira_issues(title, merged_refs, jira_comment) + else: + print "JIRA_USERNAME and JIRA_PASSWORD not set" + print "Exiting without trying to close the associated JIRA." + else: + print "Could not find jira-python library. Run 'sudo pip install jira-python' to install." + print "Exiting without trying to close the associated JIRA." + +if __name__ == "__main__": + import doctest + doctest.testmod() + + main() diff --git a/kafka-patch-review.py b/kafka-patch-review.py index b5a2e95..94873c3 100644 --- a/kafka-patch-review.py +++ b/kafka-patch-review.py @@ -1,4 +1,21 @@ #!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. import argparse import sys diff --git a/log4j-appender/src/main/java/org/apache/kafka/log4jappender/KafkaLog4jAppender.java b/log4j-appender/src/main/java/org/apache/kafka/log4jappender/KafkaLog4jAppender.java new file mode 100644 index 0000000..628ff53 --- /dev/null +++ b/log4j-appender/src/main/java/org/apache/kafka/log4jappender/KafkaLog4jAppender.java @@ -0,0 +1,167 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.log4jappender; + +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.config.ConfigException; +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.helpers.LogLog; +import org.apache.log4j.spi.LoggingEvent; + +import java.util.Date; +import java.util.Properties; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; + +/** + * A log4j appender that produces log messages to Kafka + */ +public class KafkaLog4jAppender extends AppenderSkeleton { + + private static final String BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers"; + private static final String COMPRESSION_TYPE_CONFIG = "compression.type"; + private static final String ACKS_CONFIG = "acks"; + private static final String RETRIES_CONFIG = "retries"; + private static final String KEY_SERIALIZER_CLASS_CONFIG = "key.serializer"; + private static final String VALUE_SERIALIZER_CLASS_CONFIG = "value.serializer"; + + private String brokerList = null; + private String topic = null; + private String compressionType = null; + + private int retries = 0; + private int requiredNumAcks = Integer.MAX_VALUE; + private boolean syncSend = false; + private Producer producer = null; + + public Producer getProducer() { + return producer; + } + + public String getBrokerList() { + return brokerList; + } + + public void setBrokerList(String brokerList) { + this.brokerList = brokerList; + } + + public int getRequiredNumAcks() { + return requiredNumAcks; + } + + public void setRequiredNumAcks(int requiredNumAcks) { + this.requiredNumAcks = requiredNumAcks; + } + + public int getRetries() { + return retries; + } + + public void setRetries(int retries) { + this.retries = retries; + } + + public String getCompressionType() { + return compressionType; + } + + public void setCompressionType(String compressionType) { + this.compressionType = compressionType; + } + + public String getTopic() { + return topic; + } + + public void setTopic(String topic) { + this.topic = topic; + } + + public boolean getSyncSend() { + return syncSend; + } + + public void setSyncSend(boolean syncSend) { + this.syncSend = syncSend; + } + + @Override + public void activateOptions() { + // check for config parameter validity + Properties props = new Properties(); + if (brokerList != null) + props.put(BOOTSTRAP_SERVERS_CONFIG, brokerList); + if (props.isEmpty()) + throw new ConfigException("The bootstrap servers property should be specified"); + if (topic == null) + throw new ConfigException("Topic must be specified by the Kafka log4j appender"); + if (compressionType != null) + props.put(COMPRESSION_TYPE_CONFIG, compressionType); + if (requiredNumAcks != Integer.MAX_VALUE) + props.put(ACKS_CONFIG, requiredNumAcks); + if (retries > 0) + props.put(RETRIES_CONFIG, retries); + + props.put(KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); + props.put(VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); + this.producer = getKafkaProducer(props); + LogLog.debug("Kafka producer connected to " + brokerList); + LogLog.debug("Logging for topic: " + topic); + } + + protected Producer getKafkaProducer(Properties props) { + return new KafkaProducer(props); + } + + @Override + protected void append(LoggingEvent event) { + String message = subAppend(event); + LogLog.debug("[" + new Date(event.getTimeStamp()) + "]" + message); + Future response = producer.send(new ProducerRecord(topic, message.getBytes())); + if (syncSend) { + try { + response.get(); + } catch (InterruptedException ex) { + throw new RuntimeException(ex); + } catch (ExecutionException ex) { + throw new RuntimeException(ex); + } + } + } + + private String subAppend(LoggingEvent event) { + return (this.layout == null) ? event.getRenderedMessage() : this.layout.format(event); + } + + @Override + public void close() { + if (!this.closed) { + this.closed = true; + producer.close(); + } + } + + @Override + public boolean requiresLayout() { + return true; + } +} diff --git a/log4j-appender/src/test/java/org/apache/kafka/log4jappender/KafkaLog4jAppenderTest.java b/log4j-appender/src/test/java/org/apache/kafka/log4jappender/KafkaLog4jAppenderTest.java new file mode 100644 index 0000000..71bdd94 --- /dev/null +++ b/log4j-appender/src/test/java/org/apache/kafka/log4jappender/KafkaLog4jAppenderTest.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.log4jappender; + +import org.apache.kafka.common.config.ConfigException; +import org.apache.log4j.Logger; +import org.apache.log4j.PropertyConfigurator; +import org.junit.Assert; +import org.junit.Test; + +import java.io.UnsupportedEncodingException; +import java.util.Properties; + +public class KafkaLog4jAppenderTest { + + Logger logger = Logger.getLogger(KafkaLog4jAppenderTest.class); + + @Test + public void testKafkaLog4jConfigs() { + // host missing + Properties props = new Properties(); + props.put("log4j.rootLogger", "INFO"); + props.put("log4j.appender.KAFKA", "org.apache.kafka.log4jappender.KafkaLog4jAppender"); + props.put("log4j.appender.KAFKA.layout", "org.apache.log4j.PatternLayout"); + props.put("log4j.appender.KAFKA.layout.ConversionPattern", "%-5p: %c - %m%n"); + props.put("log4j.appender.KAFKA.Topic", "test-topic"); + props.put("log4j.logger.kafka.log4j", "INFO, KAFKA"); + + try { + PropertyConfigurator.configure(props); + Assert.fail("Missing properties exception was expected !"); + } catch (ConfigException ex) { + // It's OK! + } + + // topic missing + props = new Properties(); + props.put("log4j.rootLogger", "INFO"); + props.put("log4j.appender.KAFKA", "org.apache.kafka.log4jappender.KafkaLog4jAppender"); + props.put("log4j.appender.KAFKA.layout", "org.apache.log4j.PatternLayout"); + props.put("log4j.appender.KAFKA.layout.ConversionPattern", "%-5p: %c - %m%n"); + props.put("log4j.appender.KAFKA.brokerList", "127.0.0.1:9093"); + props.put("log4j.logger.kafka.log4j", "INFO, KAFKA"); + + try { + PropertyConfigurator.configure(props); + Assert.fail("Missing properties exception was expected !"); + } catch (ConfigException ex) { + // It's OK! + } + } + + + @Test + public void testLog4jAppends() throws UnsupportedEncodingException { + PropertyConfigurator.configure(getLog4jConfig()); + + for (int i = 1; i <= 5; ++i) { + logger.error(getMessage(i)); + } + + Assert.assertEquals( + 5, ((MockKafkaLog4jAppender) (logger.getRootLogger().getAppender("KAFKA"))).getHistory().size()); + } + + private byte[] getMessage(int i) throws UnsupportedEncodingException { + return ("test_" + i).getBytes("UTF-8"); + } + + private Properties getLog4jConfig() { + Properties props = new Properties(); + props.put("log4j.rootLogger", "INFO, KAFKA"); + props.put("log4j.appender.KAFKA", "org.apache.kafka.log4jappender.MockKafkaLog4jAppender"); + props.put("log4j.appender.KAFKA.layout", "org.apache.log4j.PatternLayout"); + props.put("log4j.appender.KAFKA.layout.ConversionPattern", "%-5p: %c - %m%n"); + props.put("log4j.appender.KAFKA.BrokerList", "127.0.0.1:9093"); + props.put("log4j.appender.KAFKA.Topic", "test-topic"); + props.put("log4j.appender.KAFKA.RequiredNumAcks", "1"); + props.put("log4j.appender.KAFKA.SyncSend", "false"); + props.put("log4j.logger.kafka.log4j", "INFO, KAFKA"); + return props; + } +} + diff --git a/log4j-appender/src/test/java/org/apache/kafka/log4jappender/MockKafkaLog4jAppender.java b/log4j-appender/src/test/java/org/apache/kafka/log4jappender/MockKafkaLog4jAppender.java new file mode 100644 index 0000000..c35f26a --- /dev/null +++ b/log4j-appender/src/test/java/org/apache/kafka/log4jappender/MockKafkaLog4jAppender.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.log4jappender; + +import org.apache.kafka.clients.producer.MockProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.test.MockSerializer; +import org.apache.log4j.spi.LoggingEvent; + +import java.util.Properties; + +public class MockKafkaLog4jAppender extends KafkaLog4jAppender { + private MockProducer mockProducer = + new MockProducer(false, new MockSerializer(), new MockSerializer()); + + @Override + protected Producer getKafkaProducer(Properties props) { + return mockProducer; + } + + @Override + protected void append(LoggingEvent event) { + if (super.getProducer() == null) { + activateOptions(); + } + super.append(event); + } + + protected java.util.List> getHistory() { + return mockProducer.history(); + } +} diff --git a/scala.gradle b/scala.gradle index cabb59c..5eb2a65 100644 --- a/scala.gradle +++ b/scala.gradle @@ -1,3 +1,18 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + if (!hasProperty('scalaVersion')) { ext.scalaVersion = '2.10.5' } diff --git a/settings.gradle b/settings.gradle index 83f764e..3b6a952 100644 --- a/settings.gradle +++ b/settings.gradle @@ -14,4 +14,4 @@ // limitations under the License. apply from: file('scala.gradle') -include 'core', 'contrib:hadoop-consumer', 'contrib:hadoop-producer', 'examples', 'clients' +include 'core', 'contrib:hadoop-consumer', 'contrib:hadoop-producer', 'examples', 'clients', 'log4j-appender' diff --git a/topics.json b/topics.json deleted file mode 100644 index ff011ed..0000000 --- a/topics.json +++ /dev/null @@ -1,4 +0,0 @@ -{"topics": - [{"topic": "foo"}], - "version":1 - } diff --git a/vagrant/broker.sh b/vagrant/broker.sh index 63f2d4f..bc040c9 100644 --- a/vagrant/broker.sh +++ b/vagrant/broker.sh @@ -22,6 +22,7 @@ set -e BROKER_ID=$1 PUBLIC_ADDRESS=$2 PUBLIC_ZOOKEEPER_ADDRESSES=$3 +JMX_PORT=$4 cd /opt/kafka @@ -35,4 +36,8 @@ echo "Killing server" bin/kafka-server-stop.sh || true sleep 5 # Because kafka-server-stop.sh doesn't actually wait echo "Starting server" +if [[ -n $JMX_PORT ]]; then + export JMX_PORT=$JMX_PORT + export KAFKA_JMX_OPTS="-Djava.rmi.server.hostname=$PUBLIC_ADDRESS -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false " +fi bin/kafka-server-start.sh /opt/kafka/config/server-$BROKER_ID.properties 1>> /tmp/broker.log 2>> /tmp/broker.log & diff --git a/vagrant/zk.sh b/vagrant/zk.sh index 15517f8..6fc4b7c 100644 --- a/vagrant/zk.sh +++ b/vagrant/zk.sh @@ -21,6 +21,7 @@ set -e ZKID=$1 NUM_ZK=$2 +JMX_PORT=$3 cd /opt/kafka @@ -37,6 +38,10 @@ echo "$ZKID" > /tmp/zookeeper/myid echo "Killing ZooKeeper" bin/zookeeper-server-stop.sh || true -sleep 5 # Because kafka-server-stop.sh doesn't actually wait +sleep 5 # Because zookeeper-server-stop.sh doesn't actually wait echo "Starting ZooKeeper" +if [[ -n $JMX_PORT ]]; then + export JMX_PORT=$JMX_PORT + export KAFKA_JMX_OPTS="-Djava.rmi.server.hostname=zk$ZKID -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false " +fi bin/zookeeper-server-start.sh config/zookeeper-$ZKID.properties 1>> /tmp/zk.log 2>> /tmp/zk.log & -- 2.4.6 From 737813f08ed60e9bd39d5b2baa0bea1b624d6fa6 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Fri, 17 Jul 2015 20:31:51 -0700 Subject: [PATCH 21/30] KAFKA-1690. Added SSL Consumer Test. --- .../kafka/common/network/SSLTransportLayer.java | 30 +-- .../org/apache/kafka/common/network/Selector.java | 8 +- .../main/scala/kafka/network/SocketServer.scala | 7 +- .../integration/kafka/api/SSLConsumerTest.scala | 278 +++++++++++++++++++++ .../test/scala/unit/kafka/utils/TestUtils.scala | 21 +- 5 files changed, 321 insertions(+), 23 deletions(-) create mode 100644 core/src/test/scala/integration/kafka/api/SSLConsumerTest.scala diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index f644d44..bf59292 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -125,23 +125,25 @@ public class SSLTransportLayer implements TransportLayer { if (closing) return; closing = true; sslEngine.closeOutbound(); - - if (!flush(netWriteBuffer)) { - throw new IOException("Remaining data in the network buffer, can't send SSL close message."); - } - //prep the buffer for the close message - netWriteBuffer.clear(); - //perform the close, since we called sslEngine.closeOutbound - SSLEngineResult handshake = sslEngine.wrap(emptyBuf, netWriteBuffer); - //we should be in a close state - if (handshake.getStatus() != SSLEngineResult.Status.CLOSED) { - throw new IOException("Invalid close state, will not send network data."); + try { + if (!flush(netWriteBuffer)) { + throw new IOException("Remaining data in the network buffer, can't send SSL close message."); + } + //prep the buffer for the close message + netWriteBuffer.clear(); + //perform the close, since we called sslEngine.closeOutbound + SSLEngineResult handshake = sslEngine.wrap(emptyBuf, netWriteBuffer); + //we should be in a close state + if (handshake.getStatus() != SSLEngineResult.Status.CLOSED) { + throw new IOException("Invalid close state, will not send network data."); + } + netWriteBuffer.flip(); + flush(netWriteBuffer); + } catch (IOException ie) { + log.warn("Failed to send SSL Close message ", ie); } - netWriteBuffer.flip(); - flush(netWriteBuffer); socketChannel.socket().close(); socketChannel.close(); - closed = !netWriteBuffer.hasRemaining() && (handshake.getHandshakeStatus() != HandshakeStatus.NEED_WRAP); } /** diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index ba132ed..207a089 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -283,7 +283,7 @@ public class Selector implements Selectable { } } catch (InvalidReceiveException e) { log.error("Invalid data received from " + channel.id() + " closing connection", e); - close(channel.id()); + close(channel); this.disconnected.add(channel.id()); throw e; } @@ -300,7 +300,7 @@ public class Selector implements Selectable { /* cancel any defunct sockets */ if (!key.isValid()) { - close(channel(key)); + close(channel); this.disconnected.add(channel.id()); } } catch (IOException e) { @@ -309,7 +309,7 @@ public class Selector implements Selectable { log.debug("Connection {} disconnected", desc); else log.warn("Error in I/O with connection to {}", desc, e); - close(channel(key)); + close(channel); this.disconnected.add(channel.id()); } } @@ -434,12 +434,12 @@ public class Selector implements Selectable { * Begin closing this connection */ private void close(Channel channel) { - this.channels.remove(channel.id()); try { channel.close(); } catch (IOException e) { log.error("Exception closing connection to node {}:", channel.id(), e); } + this.channels.remove(channel.id()); this.sensors.connectionClosed.record(); } diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index fd4b667..77ae358 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -194,8 +194,9 @@ private[kafka] abstract class AbstractServerThread(connectionQuotas: ConnectionQ } def close(channel: SocketChannel) { + if(channel != null) { - debug("Closing connection from " + channel.socket.getRemoteSocketAddress()) + println("Closing connection from " + channel.socket.getRemoteSocketAddress()) connectionQuotas.dec(channel.socket.getInetAddress) swallowError(channel.socket().close()) swallowError(channel.close()) @@ -278,8 +279,8 @@ private[kafka] class Acceptor(val host: String, throw new IllegalStateException("Unrecognized key state for acceptor thread.") // round robin to the next processor thread - currentProcessor = (currentProcessor + 1) % processorEndIndex - if (currentProcessor < processorBeginIndex) currentProcessor = processorEndIndex + currentProcessor = (currentProcessor + 1) % (processorEndIndex - 1) + if (currentProcessor < processorBeginIndex) currentProcessor = processorBeginIndex } catch { case e: Throwable => error("Error while accepting connection", e) } diff --git a/core/src/test/scala/integration/kafka/api/SSLConsumerTest.scala b/core/src/test/scala/integration/kafka/api/SSLConsumerTest.scala new file mode 100644 index 0000000..cc342a9 --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/SSLConsumerTest.scala @@ -0,0 +1,278 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package kafka.api + +import java.util.Properties +import java.io.File + +import org.apache.kafka.clients.producer.ProducerConfig +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.clients.producer.KafkaProducer +import org.apache.kafka.clients.consumer.Consumer +import org.apache.kafka.clients.consumer.ConsumerConfig +import org.apache.kafka.clients.consumer.KafkaConsumer +import org.apache.kafka.clients.consumer.ConsumerRebalanceCallback +import org.apache.kafka.clients.consumer.ConsumerRecord +import org.apache.kafka.clients.consumer.ConsumerConfig +import org.apache.kafka.clients.consumer.CommitType +import org.apache.kafka.common.serialization.ByteArrayDeserializer +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.clients.consumer.NoOffsetForPartitionException +import kafka.integration.KafkaServerTestHarness + +import kafka.utils.{TestUtils, Logging} +import kafka.server.KafkaConfig + +import java.util.ArrayList +import org.junit.Assert._ + +import scala.collection.mutable.Buffer +import scala.collection.JavaConversions._ +import kafka.coordinator.ConsumerCoordinator + + +/** + * Integration tests for the new consumer that cover basic usage as well as server failures + */ +class SSLConsumerTest extends KafkaServerTestHarness with Logging { + + val trustStoreFile = File.createTempFile("truststore", ".jks") + val numServers = 3 + val producerCount = 1 + val consumerCount = 2 + val producerConfig = new Properties + val consumerConfig = new Properties + + val overridingProps = new Properties() + overridingProps.put(KafkaConfig.NumPartitionsProp, 4.toString) + overridingProps.put(KafkaConfig.ControlledShutdownEnableProp, "false") // speed up shutdown + overridingProps.put(KafkaConfig.OffsetsTopicReplicationFactorProp, "3") // don't want to lose offset + overridingProps.put(KafkaConfig.OffsetsTopicPartitionsProp, "1") + overridingProps.put(KafkaConfig.ConsumerMinSessionTimeoutMsProp, "100") // set small enough session timeout + + var consumers = Buffer[KafkaConsumer[Array[Byte], Array[Byte]]]() + var producers = Buffer[KafkaProducer[Array[Byte], Array[Byte]]]() + + def generateConfigs() = + TestUtils.createBrokerConfigs(numServers, zkConnect, false, enableSSL=true, trustStoreFile=Some(trustStoreFile)).map(KafkaConfig.fromProps(_, overridingProps)) + + val topic = "topic" + val part = 0 + val tp = new TopicPartition(topic, part) + + // configure the servers and clients + this.producerConfig.setProperty(ProducerConfig.ACKS_CONFIG, "all") + this.consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "my-test") + this.consumerConfig.setProperty(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 4096.toString) + this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") + + override def setUp() { + super.setUp() + producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, TestUtils.getSSLBrokerListStrFromServers(servers)) + producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[org.apache.kafka.common.serialization.ByteArraySerializer]) + producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[org.apache.kafka.common.serialization.ByteArraySerializer]) + consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, TestUtils.getSSLBrokerListStrFromServers(servers)) + consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[org.apache.kafka.common.serialization.ByteArrayDeserializer]) + consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[org.apache.kafka.common.serialization.ByteArrayDeserializer]) + consumerConfig.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, "range") + + for(i <- 0 until producerCount) + producers += TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), + acks = 1, + enableSSL=true, + trustStoreFile=Some(trustStoreFile)) + for(i <- 0 until consumerCount) + consumers += TestUtils.createNewConsumer(TestUtils.getSSLBrokerListStrFromServers(servers), + groupId = "my-test", + partitionAssignmentStrategy= "range", + enableSSL=true, + trustStoreFile=Some(trustStoreFile)) + + + // create the consumer offset topic + TestUtils.createTopic(zkClient, ConsumerCoordinator.OffsetsTopicName, + overridingProps.getProperty(KafkaConfig.OffsetsTopicPartitionsProp).toInt, + overridingProps.getProperty(KafkaConfig.OffsetsTopicReplicationFactorProp).toInt, + servers, + servers(0).consumerCoordinator.offsetsTopicConfigs) + + // create the test topic with all the brokers as replicas + TestUtils.createTopic(this.zkClient, topic, 1, numServers, this.servers) + } + + override def tearDown() { + producers.foreach(_.close()) + consumers.foreach(_.close()) + super.tearDown() + } + + + def testSimpleConsumption() { + val numRecords = 10000 + sendRecords(numRecords) + assertEquals(0, this.consumers(0).subscriptions.size) + this.consumers(0).subscribe(tp) + assertEquals(1, this.consumers(0).subscriptions.size) + this.consumers(0).seek(tp, 0) + consumeRecords(this.consumers(0), numRecords = numRecords, startingOffset = 0) + } + + def testAutoOffsetReset() { + sendRecords(1) + this.consumers(0).subscribe(tp) + consumeRecords(this.consumers(0), numRecords = 1, startingOffset = 0) + } + + def testSeek() { + val consumer = this.consumers(0) + val totalRecords = 50L + sendRecords(totalRecords.toInt) + consumer.subscribe(tp) + + consumer.seekToEnd(tp) + assertEquals(totalRecords, consumer.position(tp)) + assertFalse(consumer.poll(totalRecords).iterator().hasNext) + + consumer.seekToBeginning(tp) + assertEquals(0, consumer.position(tp), 0) + consumeRecords(consumer, numRecords = 1, startingOffset = 0) + + val mid = totalRecords / 2 + consumer.seek(tp, mid) + assertEquals(mid, consumer.position(tp)) + consumeRecords(consumer, numRecords = 1, startingOffset = mid.toInt) + } + + def testGroupConsumption() { + sendRecords(10) + this.consumers(0).subscribe(topic) + consumeRecords(this.consumers(0), numRecords = 1, startingOffset = 0) + } + + def testPositionAndCommit() { + sendRecords(5) + + // committed() on a partition with no committed offset throws an exception + intercept[NoOffsetForPartitionException] { + this.consumers(0).committed(new TopicPartition(topic, 15)) + } + + // position() on a partition that we aren't subscribed to throws an exception + intercept[IllegalArgumentException] { + this.consumers(0).position(new TopicPartition(topic, 15)) + } + + this.consumers(0).subscribe(tp) + + assertEquals("position() on a partition that we are subscribed to should reset the offset", 0L, this.consumers(0).position(tp)) + this.consumers(0).commit(CommitType.SYNC) + assertEquals(0L, this.consumers(0).committed(tp)) + + consumeRecords(this.consumers(0), 5, 0) + assertEquals("After consuming 5 records, position should be 5", 5L, this.consumers(0).position(tp)) + this.consumers(0).commit(CommitType.SYNC) + assertEquals("Committed offset should be returned", 5L, this.consumers(0).committed(tp)) + + sendRecords(1) + + // another consumer in the same group should get the same position + this.consumers(1).subscribe(tp) + consumeRecords(this.consumers(1), 1, 5) + } + + def testPartitionsFor() { + val numParts = 2 + TestUtils.createTopic(this.zkClient, "part-test", numParts, 1, this.servers) + val parts = this.consumers(0).partitionsFor("part-test") + assertNotNull(parts) + assertEquals(2, parts.length) + assertNull(this.consumers(0).partitionsFor("non-exist-topic")) + } + + def testPartitionReassignmentCallback() { + val callback = new TestConsumerReassignmentCallback() + val consumer0 = TestUtils.createNewConsumer(TestUtils.getSSLBrokerListStrFromServers(servers), + groupId = "my-test", + partitionAssignmentStrategy= "range", + sessionTimeout = 200, + callback = Some(callback), + enableSSL=true, + trustStoreFile=Some(trustStoreFile)) + consumer0.subscribe(topic) + // the initial subscription should cause a callback execution + while(callback.callsToAssigned == 0) + consumer0.poll(50) + + // get metadata for the topic + var parts = consumer0.partitionsFor(ConsumerCoordinator.OffsetsTopicName) + while(parts == null) + parts = consumer0.partitionsFor(ConsumerCoordinator.OffsetsTopicName) + assertEquals(1, parts.size) + assertNotNull(parts(0).leader()) + + // shutdown the coordinator + val coordinator = parts(0).leader().id() + this.servers(coordinator).shutdown() + + // this should cause another callback execution + while(callback.callsToAssigned < 2) + consumer0.poll(50) + assertEquals(2, callback.callsToAssigned) + assertEquals(2, callback.callsToRevoked) + + consumer0.close() + } + + + private class TestConsumerReassignmentCallback extends ConsumerRebalanceCallback { + var callsToAssigned = 0 + var callsToRevoked = 0 + def onPartitionsAssigned(consumer: Consumer[_,_], partitions: java.util.Collection[TopicPartition]) { + info("onPartitionsAssigned called.") + callsToAssigned += 1 + } + def onPartitionsRevoked(consumer: Consumer[_,_], partitions: java.util.Collection[TopicPartition]) { + info("onPartitionsRevoked called.") + callsToRevoked += 1 + } + } + + private def sendRecords(numRecords: Int) { + val futures = (0 until numRecords).map { i => + this.producers(0).send(new ProducerRecord(topic, part, i.toString.getBytes, i.toString.getBytes)) + } + futures.map(_.get) + } + + private def consumeRecords(consumer: Consumer[Array[Byte], Array[Byte]], numRecords: Int, startingOffset: Int) { + val records = new ArrayList[ConsumerRecord[Array[Byte], Array[Byte]]]() + val maxIters = numRecords * 300 + var iters = 0 + while (records.size < numRecords) { + for (record <- consumer.poll(50)) { + records.add(record) + } + if(iters > maxIters) + throw new IllegalStateException("Failed to consume the expected records after " + iters + " iterations.") + iters += 1 + } + for (i <- 0 until numRecords) { + val record = records.get(i) + val offset = startingOffset + i + assertEquals(topic, record.topic()) + assertEquals(part, record.partition()) + assertEquals(offset.toLong, record.offset()) + } + } + +} diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala index 7d5cbd1..e1d187a 100755 --- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala +++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala @@ -49,6 +49,8 @@ import junit.framework.AssertionFailedError import junit.framework.Assert._ import org.apache.kafka.clients.producer.KafkaProducer import org.apache.kafka.clients.consumer.KafkaConsumer +import org.apache.kafka.clients.consumer.ConsumerRebalanceCallback +import org.apache.kafka.common.serialization.ByteArrayDeserializer import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.common.network.SSLFactory import org.apache.kafka.common.config.SSLConfigs @@ -430,7 +432,12 @@ object TestUtils extends Logging { def createNewConsumer(brokerList: String, groupId: String, autoOffsetReset: String = "earliest", - partitionFetchSize: Long = 4096L) : KafkaConsumer[Array[Byte],Array[Byte]] = { + partitionFetchSize: Long = 4096L, + partitionAssignmentStrategy: String = "blah", + sessionTimeout: Int = 30000, + callback: Option[ConsumerRebalanceCallback] = None, + enableSSL: Boolean = false, + trustStoreFile: Option[File] = None) : KafkaConsumer[Array[Byte],Array[Byte]] = { import org.apache.kafka.clients.consumer.ConsumerConfig val consumerProps= new Properties() @@ -442,7 +449,17 @@ object TestUtils extends Logging { consumerProps.put(ConsumerConfig.RECONNECT_BACKOFF_MS_CONFIG, "200") consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer") consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer") - new KafkaConsumer[Array[Byte],Array[Byte]](consumerProps) + consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, partitionAssignmentStrategy) + consumerProps.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout.toString) + if (enableSSL) { + consumerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL") + consumerProps.putAll(addSSLConfigs(SSLFactory.Mode.CLIENT, false, trustStoreFile, "consumer")) + } + if (callback.isDefined) { + new KafkaConsumer[Array[Byte],Array[Byte]](consumerProps, callback.get, new ByteArrayDeserializer(), new ByteArrayDeserializer()) + } else { + new KafkaConsumer[Array[Byte],Array[Byte]](consumerProps) + } } /** -- 2.4.6 From ac957b562fccb1c1e9868e7fea67828f3e0d76b9 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Sat, 18 Jul 2015 14:01:04 -0700 Subject: [PATCH 22/30] KAFKA-1690. SSL Support. --- .../java/org/apache/kafka/clients/ClientUtils.java | 4 +- .../org/apache/kafka/common/config/SSLConfigs.java | 13 +- .../apache/kafka/common/network/Authenticator.java | 26 ++-- .../org/apache/kafka/common/network/Channel.java | 154 -------------------- .../kafka/common/network/ChannelBuilder.java | 2 +- .../kafka/common/network/DefaultAuthenticator.java | 4 +- .../apache/kafka/common/network/KafkaChannel.java | 158 +++++++++++++++++++++ .../common/network/PlainTextChannelBuilder.java | 9 +- .../common/network/PlainTextTransportLayer.java | 4 + .../kafka/common/network/SSLChannelBuilder.java | 9 +- .../apache/kafka/common/network/SSLFactory.java | 12 +- .../kafka/common/network/SSLTransportLayer.java | 9 +- .../org/apache/kafka/common/network/Selector.java | 40 +++--- .../kafka/common/network/TransportLayer.java | 7 +- .../main/scala/kafka/network/SocketServer.scala | 32 ++--- core/src/main/scala/kafka/server/KafkaConfig.scala | 22 ++- core/src/main/scala/kafka/server/KafkaServer.scala | 15 +- .../unit/kafka/network/SocketServerTest.scala | 28 +++- .../kafka/server/KafkaConfigConfigDefTest.scala | 6 +- 19 files changed, 302 insertions(+), 252 deletions(-) delete mode 100644 clients/src/main/java/org/apache/kafka/common/network/Channel.java create mode 100644 clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index d70ad33..33cb967 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -79,8 +79,10 @@ public class ClientUtils { if (securityProtocol == SecurityProtocol.SSL) { channelBuilder = new SSLChannelBuilder(SSLFactory.Mode.CLIENT); - } else { + } else if (securityProtocol == SecurityProtocol.PLAINTEXT) { channelBuilder = new PlainTextChannelBuilder(); + } else { + throw new ConfigException("Invalid SecurityProtocol " + CommonClientConfigs.SECURITY_PROTOCOL_CONFIG); } channelBuilder.configure(configs); diff --git a/clients/src/main/java/org/apache/kafka/common/config/SSLConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/SSLConfigs.java index e861358..a02cf0c 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/SSLConfigs.java +++ b/clients/src/main/java/org/apache/kafka/common/config/SSLConfigs.java @@ -85,8 +85,17 @@ public class SSLConfigs { public static final String SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG = "ssl.endpoint.identification.algorithm"; public static final String SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC = "The endpoint identification algorithm to validate server hostname using server certificate. "; - public static final String SSL_NEED_CLIENT_AUTH_CONFIG = "ssl.need.client.auth"; - public static final String SSL_NEED_CLIENT_AUTH_DOC = "If set to true kafka broker requires all the ssl client connecting to provide client authentication. " + public static final String SSL_CLIENT_AUTH_CONFIG = "ssl.client.auth"; + public static final String SSL_CLIENT_AUTH_DOC = "Configures kafka broker to request client authentication." + + " The following settings are common: " + + "
        " + + "
      • ssl.want.client.auth=required If set to required" + + " client authentication is required." + + "
      • ssl.client.auth=requested This means client authentication is optional." + + " unlike requested , if this option is set client can choose not to provide authentication information about itself" + + "
      • ssl.client.auth=none This means client authentication is not needed."; + + public static final String SSL_NEED_CLIENT_AUTH_DOC = "It can be REQUESTED . " + "Default value is false"; public static final Boolean DEFAULT_SSL_NEED_CLIENT_AUTH = false; diff --git a/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java b/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java index b3f574b..261f571 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Authenticator.java @@ -24,21 +24,17 @@ package org.apache.kafka.common.network; import java.io.IOException; import java.security.Principal; +import org.apache.kafka.common.security.auth.PrincipalBuilder; import org.apache.kafka.common.KafkaException; public interface Authenticator { /** - * Closes this Authenticator - * - * @throws IOException if any I/O error occurs - */ - void close() throws IOException; - - /** - * Returns Principal after authentication is established + * configures Authenticator using principalbuilder and transportLayer. + * @param TransportLayer transportLayer + * @param PrincipalBuilder principalBuilder */ - Principal principal() throws KafkaException; + void configure(TransportLayer transportLayer, PrincipalBuilder principalBuilder); /** * Implements any authentication mechanism. Use transportLayer to read or write tokens. @@ -47,8 +43,20 @@ public interface Authenticator { void authenticate() throws IOException; /** + * Returns Principal using PrincipalBuilder + */ + Principal principal() throws KafkaException; + + /** * returns true if authentication is complete otherwise returns false; */ boolean complete(); + /** + * Closes this Authenticator + * + * @throws IOException if any I/O error occurs + */ + void close() throws IOException; + } diff --git a/clients/src/main/java/org/apache/kafka/common/network/Channel.java b/clients/src/main/java/org/apache/kafka/common/network/Channel.java deleted file mode 100644 index 0085402..0000000 --- a/clients/src/main/java/org/apache/kafka/common/network/Channel.java +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.network; - - -import java.io.IOException; - -import java.net.Socket; -import java.nio.channels.SelectionKey; - -import java.security.Principal; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -public class Channel { - private static final Logger log = LoggerFactory.getLogger(Channel.class); - private final String id; - public TransportLayer transportLayer; - private Authenticator authenticator; - private NetworkReceive receive; - private Send send; - private int maxReceiveSize; - - public Channel(String id, TransportLayer transportLayer, Authenticator authenticator, int maxReceiveSize) throws IOException { - this.id = id; - this.transportLayer = transportLayer; - this.authenticator = authenticator; - this.maxReceiveSize = maxReceiveSize; - } - - public void close() throws IOException { - transportLayer.close(); - authenticator.close(); - } - - /** - * returns user principal for the session - * In case of PLAINTEXT and No Authentication returns ANONYMOUS as the userPrincipal - * If SSL used without any SASL Authentication returns SSLSession.peerPrincipal - */ - public Principal principal() throws IOException { - return authenticator.principal(); - } - - /** - * Does handshake of transportLayer and Authentication using configured authenticator - */ - public void prepare() throws IOException { - if (transportLayer.ready() && authenticator.complete()) - return; - if (!transportLayer.ready()) - transportLayer.handshake(); - if (transportLayer.ready() && !authenticator.complete()) - authenticator.authenticate(); - } - - public void disconnect() { - transportLayer.disconnect(); - } - - - public void finishConnect() throws IOException { - transportLayer.finishConnect(); - } - - public String id() { - return id; - } - - public void mute() { - transportLayer.removeInterestOps(SelectionKey.OP_READ); - } - - public void unmute() { - transportLayer.addInterestOps(SelectionKey.OP_READ); - } - - public boolean ready() { - return transportLayer.ready() && authenticator.complete(); - } - - public String socketDescription() { - Socket socket = transportLayer.socketChannel().socket(); - if (socket == null) - return "[unconnected socket]"; - else if (socket.getInetAddress() != null) - return socket.getInetAddress().toString(); - else - return socket.getLocalAddress().toString(); - } - - public void setSend(Send send) { - if (this.send != null) - throw new IllegalStateException("Attempt to begin a send operation with prior send operation still in progress."); - this.send = send; - this.transportLayer.addInterestOps(SelectionKey.OP_WRITE); - } - - public NetworkReceive read() throws IOException { - NetworkReceive result = null; - - if (receive == null) { - receive = new NetworkReceive(maxReceiveSize, id); - } - - long x = receive(receive); - if (receive.complete()) { - receive.payload().rewind(); - result = receive; - receive = null; - } - return result; - } - - public Send write() throws IOException { - Send result = null; - if (send != null && send(send)) { - result = send; - send = null; - } - return result; - } - - private long receive(NetworkReceive receive) throws IOException { - long result = receive.readFrom(transportLayer); - return result; - } - - private boolean send(Send send) throws IOException { - send.writeTo(transportLayer); - if (send.completed()) { - transportLayer.removeInterestOps(SelectionKey.OP_WRITE); - } - return send.completed(); - } - -} diff --git a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java index e0ff4e8..2629392 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java @@ -33,7 +33,7 @@ public interface ChannelBuilder { * @param id channel id * @param key SelectionKey */ - public Channel buildChannel(String id, SelectionKey key, int maxReceiveSize) throws KafkaException; + public KafkaChannel buildChannel(String id, SelectionKey key, int maxReceiveSize) throws KafkaException; /** diff --git a/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java index 371c97d..813a4aa 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/network/DefaultAuthenticator.java @@ -29,7 +29,7 @@ public class DefaultAuthenticator implements Authenticator { private PrincipalBuilder principalBuilder; private Principal principal; - public DefaultAuthenticator(TransportLayer transportLayer, PrincipalBuilder principalBuilder) { + public void configure(TransportLayer transportLayer, PrincipalBuilder principalBuilder) { this.transportLayer = transportLayer; this.principalBuilder = principalBuilder; } @@ -53,7 +53,7 @@ public class DefaultAuthenticator implements Authenticator { public void close() throws IOException {} /** - * DefaultAuthenticator doesn't implement any additional authentication. + * DefaultAuthenticator doesn't implement any additional authentication mechanism. * @returns true */ public boolean complete() { diff --git a/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java b/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java new file mode 100644 index 0000000..72d5a75 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java @@ -0,0 +1,158 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.network; + + +import java.io.IOException; + +import java.net.Socket; +import java.nio.channels.SelectionKey; + +import java.security.Principal; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +public class KafkaChannel { + private static final Logger log = LoggerFactory.getLogger(KafkaChannel.class); + private final String id; + public TransportLayer transportLayer; + private Authenticator authenticator; + private NetworkReceive receive; + private Send send; + private int maxReceiveSize; + + public KafkaChannel(String id, TransportLayer transportLayer, Authenticator authenticator, int maxReceiveSize) throws IOException { + this.id = id; + this.transportLayer = transportLayer; + this.authenticator = authenticator; + this.maxReceiveSize = maxReceiveSize; + } + + public void close() throws IOException { + transportLayer.close(); + authenticator.close(); + } + + /** + * returns user principal for the session + * In case of PLAINTEXT and No Authentication returns ANONYMOUS as the userPrincipal + * If SSL used without any SASL Authentication returns SSLSession.peerPrincipal + */ + public Principal principal() throws IOException { + return authenticator.principal(); + } + + /** + * Does handshake of transportLayer and Authentication using configured authenticator + */ + public void prepare() throws IOException { + if (transportLayer.ready() && authenticator.complete()) + return; + if (!transportLayer.ready()) + transportLayer.handshake(); + if (transportLayer.ready() && !authenticator.complete()) + authenticator.authenticate(); + } + + public void disconnect() { + transportLayer.disconnect(); + } + + + public void finishConnect() throws IOException { + transportLayer.finishConnect(); + } + + public boolean isConnected() { + return transportLayer.isConnected(); + } + + public String id() { + return id; + } + + public void mute() { + transportLayer.removeInterestOps(SelectionKey.OP_READ); + } + + public void unmute() { + transportLayer.addInterestOps(SelectionKey.OP_READ); + } + + public boolean ready() { + return transportLayer.ready() && authenticator.complete(); + } + + public String socketDescription() { + Socket socket = transportLayer.socketChannel().socket(); + if (socket == null) + return "[unconnected socket]"; + else if (socket.getInetAddress() != null) + return socket.getInetAddress().toString(); + else + return socket.getLocalAddress().toString(); + } + + public void setSend(Send send) { + if (this.send != null) + throw new IllegalStateException("Attempt to begin a send operation with prior send operation still in progress."); + this.send = send; + this.transportLayer.addInterestOps(SelectionKey.OP_WRITE); + } + + public NetworkReceive read() throws IOException { + NetworkReceive result = null; + + if (receive == null) { + receive = new NetworkReceive(maxReceiveSize, id); + } + + long x = receive(receive); + if (receive.complete()) { + receive.payload().rewind(); + result = receive; + receive = null; + } + return result; + } + + public Send write() throws IOException { + Send result = null; + if (send != null && send(send)) { + result = send; + send = null; + } + return result; + } + + private long receive(NetworkReceive receive) throws IOException { + long result = receive.readFrom(transportLayer); + return result; + } + + private boolean send(Send send) throws IOException { + send.writeTo(transportLayer); + if (send.completed()) { + transportLayer.removeInterestOps(SelectionKey.OP_WRITE); + } + return send.completed(); + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java index 4b9837a..a8e1bb0 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java @@ -37,12 +37,13 @@ public class PlainTextChannelBuilder implements ChannelBuilder { } } - public Channel buildChannel(String id, SelectionKey key, int maxReceiveSize) throws KafkaException { - Channel channel = null; + public KafkaChannel buildChannel(String id, SelectionKey key, int maxReceiveSize) throws KafkaException { + KafkaChannel channel = null; try { PlainTextTransportLayer transportLayer = new PlainTextTransportLayer(key); - Authenticator authenticator = new DefaultAuthenticator(transportLayer, this.principalBuilder); - channel = new Channel(id, transportLayer, authenticator, maxReceiveSize); + Authenticator authenticator = new DefaultAuthenticator(); + authenticator.configure(transportLayer, this.principalBuilder); + channel = new KafkaChannel(id, transportLayer, authenticator, maxReceiveSize); } catch (Exception e) { log.warn("Failed to create channel due to ", e); throw new KafkaException(e); diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java index 1209670..d6e9009 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java @@ -69,6 +69,10 @@ public class PlainTextTransportLayer implements TransportLayer { return socketChannel.isOpen(); } + public boolean isConnected() { + return socketChannel.isConnected(); + } + /** * Closes this channel * diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java index 0a74a2b..17c33c3 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java @@ -44,15 +44,16 @@ public class SSLChannelBuilder implements ChannelBuilder { } } - public Channel buildChannel(String id, SelectionKey key, int maxReceiveSize) throws KafkaException { - Channel channel = null; + public KafkaChannel buildChannel(String id, SelectionKey key, int maxReceiveSize) throws KafkaException { + KafkaChannel channel = null; try { SocketChannel socketChannel = (SocketChannel) key.channel(); SSLTransportLayer transportLayer = new SSLTransportLayer(key, sslFactory.createSSLEngine(socketChannel.socket().getInetAddress().getHostName(), socketChannel.socket().getPort())); - Authenticator authenticator = new DefaultAuthenticator(transportLayer, this.principalBuilder); - channel = new Channel(id, transportLayer, authenticator, maxReceiveSize); + Authenticator authenticator = new DefaultAuthenticator(); + authenticator.configure(transportLayer, this.principalBuilder); + channel = new KafkaChannel(id, transportLayer, authenticator, maxReceiveSize); } catch (Exception e) { log.info("Failed to create channel due to ", e); throw new KafkaException(e); diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java index b843ee1..7cc0c3b 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java @@ -72,10 +72,12 @@ public class SSLFactory implements Configurable { this.endpointIdentification = (String) configs.get(SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG); } - if (configs.containsKey(SSLConfigs.SSL_NEED_CLIENT_AUTH_CONFIG)) { - this.needClientAuth = (Boolean) configs.get(SSLConfigs.SSL_NEED_CLIENT_AUTH_CONFIG); - } else if (configs.containsKey(SSLConfigs.SSL_WANT_CLIENT_AUTH_CONFIG)) { - this.wantClientAuth = (Boolean) configs.get(SSLConfigs.SSL_WANT_CLIENT_AUTH_CONFIG); + if (configs.containsKey(SSLConfigs.SSL_CLIENT_AUTH_CONFIG)) { + String clientAuthConfig = (String) configs.get(SSLConfigs.SSL_CLIENT_AUTH_CONFIG); + if (clientAuthConfig.equals("required")) + this.needClientAuth = true; + else if (clientAuthConfig.equals("requested")) + this.wantClientAuth = true; } this.kmfAlgorithm = (String) configs.get(SSLConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG); @@ -135,7 +137,7 @@ public class SSLFactory implements Configurable { if (needClientAuth) sslEngine.setNeedClientAuth(needClientAuth); else - sslEngine.setNeedClientAuth(wantClientAuth); + sslEngine.setWantClientAuth(wantClientAuth); } else { sslEngine.setUseClientMode(true); SSLParameters sslParams = sslEngine.getSSLParameters(); diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index bf59292..fe60603 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -116,6 +116,11 @@ public class SSLTransportLayer implements TransportLayer { return socketChannel.isOpen(); } + public boolean isConnected() { + return socketChannel.isConnected(); + } + + /** * Sends a SSL close message and closes socketChannel. * @throws IOException if an I/O error occurs @@ -561,7 +566,7 @@ public class SSLTransportLayer implements TransportLayer { } /** - * Adds interestOps to SelecitonKey of the TransportLayer + * Adds interestOps to SelectionKey of the TransportLayer * @param ops SelectionKey interestOps */ public void addInterestOps(int ops) { @@ -575,7 +580,7 @@ public class SSLTransportLayer implements TransportLayer { } /** - * removes interestOps to SelecitonKey of the TransportLayer + * removes interestOps to SelectionKey of the TransportLayer * @param ops SelectionKey interestOps */ public void removeInterestOps(int ops) { diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index 207a089..f314218 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -68,7 +68,7 @@ public class Selector implements Selectable { private static final Logger log = LoggerFactory.getLogger(Selector.class); private final java.nio.channels.Selector nioSelector; - private final Map channels; + private final Map channels; private final List completedSends; private final List completedReceives; private final List disconnected; @@ -101,7 +101,7 @@ public class Selector implements Selectable { this.time = time; this.metricGrpPrefix = metricGrpPrefix; this.metricTags = metricTags; - this.channels = new HashMap(); + this.channels = new HashMap(); this.completedSends = new ArrayList(); this.completedReceives = new ArrayList(); this.connected = new ArrayList(); @@ -154,7 +154,7 @@ public class Selector implements Selectable { throw e; } SelectionKey key = socketChannel.register(nioSelector, SelectionKey.OP_CONNECT); - Channel channel = channelBuilder.buildChannel(id, key, maxReceiveSize); + KafkaChannel channel = channelBuilder.buildChannel(id, key, maxReceiveSize); key.attach(channel); this.channels.put(id, channel); } @@ -166,7 +166,7 @@ public class Selector implements Selectable { */ public void register(String id, SocketChannel socketChannel) throws ClosedChannelException { SelectionKey key = socketChannel.register(nioSelector, SelectionKey.OP_READ); - Channel channel = channelBuilder.buildChannel(id, key, maxReceiveSize); + KafkaChannel channel = channelBuilder.buildChannel(id, key, maxReceiveSize); key.attach(channel); this.channels.put(id, channel); } @@ -177,7 +177,7 @@ public class Selector implements Selectable { */ @Override public void disconnect(String id) { - Channel channel = channelForId(id); + KafkaChannel channel = channelForId(id); if (channel != null) channel.disconnect(); } @@ -212,7 +212,7 @@ public class Selector implements Selectable { * @param send The request to send */ public void send(Send send) { - Channel channel = channelForId(send.destination()); + KafkaChannel channel = channelForId(send.destination()); if (channel == null) { throw new IllegalStateException("channel is not connected"); } @@ -254,7 +254,7 @@ public class Selector implements Selectable { while (iter.hasNext()) { SelectionKey key = iter.next(); iter.remove(); - Channel channel = channel(key); + KafkaChannel channel = channel(key); // register all per-connection metrics at once sensors.maybeRegisterConnectionMetrics(channel.id()); @@ -269,7 +269,7 @@ public class Selector implements Selectable { } /* if channel is not ready finish prepare */ - if (!channel.ready()) { + if (channel.isConnected() && !channel.ready()) { channel.prepare(); } @@ -341,33 +341,33 @@ public class Selector implements Selectable { @Override public void mute(String id) { - Channel channel = channelForId(id); + KafkaChannel channel = channelForId(id); mute(channel); } - private void mute(Channel channel) { + private void mute(KafkaChannel channel) { channel.mute(); } @Override public void unmute(String id) { - Channel channel = channelForId(id); + KafkaChannel channel = channelForId(id); unmute(channel); } - private void unmute(Channel channel) { + private void unmute(KafkaChannel channel) { channel.unmute(); } @Override public void muteAll() { - for (Channel channel : this.channels.values()) + for (KafkaChannel channel : this.channels.values()) mute(channel); } @Override public void unmuteAll() { - for (Channel channel : this.channels.values()) + for (KafkaChannel channel : this.channels.values()) unmute(channel); } @@ -425,7 +425,7 @@ public class Selector implements Selectable { * @param id channel id */ public void close(String id) { - Channel channel = this.channels.get(id); + KafkaChannel channel = this.channels.get(id); if (channel != null) close(channel); } @@ -433,7 +433,7 @@ public class Selector implements Selectable { /** * Begin closing this connection */ - private void close(Channel channel) { + private void close(KafkaChannel channel) { try { channel.close(); } catch (IOException e) { @@ -446,8 +446,8 @@ public class Selector implements Selectable { /** * Get the channel associated with this numeric id */ - private Channel channelForId(String id) { - Channel channel = this.channels.get(id); + private KafkaChannel channelForId(String id) { + KafkaChannel channel = this.channels.get(id); if (channel == null) throw new IllegalStateException("Attempt to write to socket for which there is no open connection. Connection id " + id + " existing connections " + channels.keySet().toString()); return channel; @@ -456,8 +456,8 @@ public class Selector implements Selectable { /** * Get the channel associated with selectionKey */ - private Channel channel(SelectionKey key) { - return (Channel) key.attachment(); + private KafkaChannel channel(SelectionKey key) { + return (KafkaChannel) key.attachment(); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java index 2fa4437..411423c 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java @@ -31,7 +31,7 @@ import java.security.Principal; public interface TransportLayer extends ScatteringByteChannel, GatheringByteChannel { /** - * Returns true if the channel has handshake and authenticaiton done. + * Returns true if the channel has handshake and authentication done. */ boolean ready(); @@ -46,6 +46,11 @@ public interface TransportLayer extends ScatteringByteChannel, GatheringByteChan void disconnect(); /** + * Tells whether or not this channel's network socket is connected. + */ + boolean isConnected(); + + /** * returns underlying socketChannel */ SocketChannel socketChannel(); diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index 77ae358..272ca27 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -46,22 +46,10 @@ import scala.collection._ * Acceptor has N Processor threads that each have their own selector and read requests from sockets * M Handler threads that handle requests and produce responses back to the processor threads for writing. */ -class SocketServer(val config: KafkaConfig) extends Logging with KafkaMetricsGroup { - - private val jmxPrefix: String = "kafka.server" - private val reporters: java.util.List[MetricsReporter] = config.metricReporterClasses - reporters.add(new JmxReporter(jmxPrefix)) - - private val metricConfig: MetricConfig = new MetricConfig() - .samples(config.metricNumSamples) - .timeWindow(config.metricSampleWindowMs, TimeUnit.MILLISECONDS) +class SocketServer(val config: KafkaConfig, val metrics: Metrics, val time: Time) extends Logging with KafkaMetricsGroup { val channelConfigs = config.channelConfigs - // This exists so SocketServer (which uses Client libraries) can use the client Time objects without having to convert all of Kafka to use them - // Once we get rid of kafka.utils.time, we can get rid of this too - private val time: org.apache.kafka.common.utils.Time = new org.apache.kafka.common.utils.SystemTime() - val endpoints = config.listeners val numProcessorThreads = config.numNetworkThreads val maxQueuedRequests = config.queuedMaxRequests @@ -105,7 +93,7 @@ class SocketServer(val config: KafkaConfig) extends Logging with KafkaMetricsGro var processorBeginIndex = 0 endpoints.values.foreach(endpoint => { val acceptor = new Acceptor(endpoint.host, endpoint.port, sendBufferSize, recvBufferSize, config.brokerId, requestChannel, processors, processorBeginIndex, numProcessorThreads, quotas, - endpoint.protocolType, portToProtocol, channelConfigs, maxQueuedRequests, maxRequestSize, connectionsMaxIdleMs, new Metrics(metricConfig, reporters, time), allMetricNames, time) + endpoint.protocolType, portToProtocol, channelConfigs, maxQueuedRequests, maxRequestSize, connectionsMaxIdleMs, metrics, allMetricNames, time) acceptors.put(endpoint, acceptor) Utils.newThread("kafka-socket-acceptor-%s-%d".format(endpoint.protocolType.toString, endpoint.port), acceptor, false).start() acceptor.awaitStartup @@ -113,6 +101,13 @@ class SocketServer(val config: KafkaConfig) extends Logging with KafkaMetricsGro }) } + newGauge("NetworkProcessorAvgIdlePercent", + new Gauge[Double] { + def value = allMetricNames.map( metricName => + metrics.metrics().get(metricName).value()).sum / totalProcessorThreads + } + ) + info("Started " + acceptors.size + " acceptor threads") } @@ -232,13 +227,6 @@ private[kafka] class Acceptor(val host: String, portToProtocol.put(serverChannel.socket().getLocalPort, protocol) - newGauge("NetworkProcessorAvgIdlePercent", - new Gauge[Double] { - def value = allMetricNames.map( metricName => - metrics.metrics().get(metricName).value()).sum / numProcessorThreads - } - ) - this.synchronized { for (i <- processorBeginIndex until processorEndIndex) { processors(i) = new Processor(i, @@ -279,7 +267,7 @@ private[kafka] class Acceptor(val host: String, throw new IllegalStateException("Unrecognized key state for acceptor thread.") // round robin to the next processor thread - currentProcessor = (currentProcessor + 1) % (processorEndIndex - 1) + currentProcessor = (currentProcessor + 1) % processorEndIndex if (currentProcessor < processorBeginIndex) currentProcessor = processorBeginIndex } catch { case e: Throwable => error("Error while accepting connection", e) diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 08c1d3f..33ca15b 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -154,8 +154,10 @@ object Defaults { val SSLTruststorePassword = SSLConfigs.DEFAULT_TRUSTSTORE_PASSWORD val SSLKeyManagerAlgorithm = SSLConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM val SSLTrustManagerAlgorithm = SSLConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM - val SSLNeedClientAuth = false - val SSLWantClientAuth = false + val SSLClientAuthRequired = "required" + val SSLClientAuthRequested = "requested" + val SSLClientAuthNone = "none" + val SSLClientAuth = SSLClientAuthNone } @@ -292,8 +294,7 @@ object KafkaConfig { val SSLKeyManagerAlgorithmProp = SSLConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG val SSLTrustManagerAlgorithmProp = SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG val SSLEndpointIdentificationAlgorithmProp = SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG - val SSLNeedClientAuthProp = SSLConfigs.SSL_NEED_CLIENT_AUTH_CONFIG - val SSLWantClientAuthProp = SSLConfigs.SSL_WANT_CLIENT_AUTH_CONFIG + val SSLClientAuthProp = SSLConfigs.SSL_CLIENT_AUTH_CONFIG /* Documentation */ @@ -450,9 +451,7 @@ object KafkaConfig { val SSLKeyManagerAlgorithmDoc = SSLConfigs.SSL_KEYMANAGER_ALGORITHM_DOC val SSLTrustManagerAlgorithmDoc = SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC val SSLEndpointIdentificationAlgorithmDoc = SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC - val SSLNeedClientAuthDoc = SSLConfigs.SSL_NEED_CLIENT_AUTH_DOC - val SSLWantClientAuthDoc = SSLConfigs.SSL_WANT_CLIENT_AUTH_DOC - + val SSLClientAuthDoc = SSLConfigs.SSL_CLIENT_AUTH_DOC private val configDef = { import ConfigDef.Range._ @@ -592,8 +591,7 @@ object KafkaConfig { .define(SSLTruststorePasswordProp, STRING, Defaults.SSLTruststorePassword, MEDIUM, SSLTruststorePasswordDoc) .define(SSLKeyManagerAlgorithmProp, STRING, Defaults.SSLKeyManagerAlgorithm, MEDIUM, SSLKeyManagerAlgorithmDoc) .define(SSLTrustManagerAlgorithmProp, STRING, Defaults.SSLTrustManagerAlgorithm, MEDIUM, SSLTrustManagerAlgorithmDoc) - .define(SSLNeedClientAuthProp, BOOLEAN, Defaults.SSLNeedClientAuth, MEDIUM, SSLNeedClientAuthDoc) - .define(SSLWantClientAuthProp, BOOLEAN, Defaults.SSLWantClientAuth, MEDIUM, SSLWantClientAuthDoc) + .define(SSLClientAuthProp, STRING, Defaults.SSLClientAuth, in(Defaults.SSLClientAuthRequired, Defaults.SSLClientAuthRequested, Defaults.SSLClientAuthNone), MEDIUM, SSLClientAuthDoc) } def configNames() = { @@ -747,8 +745,7 @@ case class KafkaConfig (props: java.util.Map[_, _]) extends AbstractConfig(Kafka val sslTruststorePassword = getString(KafkaConfig.SSLTruststorePasswordProp) val sslKeyManagerAlgorithm = getString(KafkaConfig.SSLKeyManagerAlgorithmProp) val sslTrustManagerAlgorithm = getString(KafkaConfig.SSLTrustManagerAlgorithmProp) - val sslNeedClientAuth = getBoolean(KafkaConfig.SSLNeedClientAuthProp) - val sslWantClientAuth = getBoolean(KafkaConfig.SSLWantClientAuthProp) + val sslClientAuth = getString(KafkaConfig.SSLClientAuthProp) val deleteTopicEnable = getBoolean(KafkaConfig.DeleteTopicEnableProp) val compressionType = getString(KafkaConfig.CompressionTypeProp) @@ -880,8 +877,7 @@ case class KafkaConfig (props: java.util.Map[_, _]) extends AbstractConfig(Kafka channelConfigs.put(SSLTruststorePasswordProp, sslTruststorePassword) channelConfigs.put(SSLKeyManagerAlgorithmProp, sslKeyManagerAlgorithm) channelConfigs.put(SSLTrustManagerAlgorithmProp, sslTrustManagerAlgorithm) - channelConfigs.put(SSLNeedClientAuthProp, sslNeedClientAuth: java.lang.Boolean) - channelConfigs.put(SSLWantClientAuthProp, sslWantClientAuth: java.lang.Boolean) + channelConfigs.put(SSLClientAuthProp, sslClientAuth) channelConfigs } diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala index 76ac548..b011466 100755 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ b/core/src/main/scala/kafka/server/KafkaServer.scala @@ -55,6 +55,18 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg private var shutdownLatch = new CountDownLatch(1) + private val jmxPrefix: String = "kafka.server" + private val reporters: java.util.List[MetricsReporter] = config.metricReporterClasses + reporters.add(new JmxReporter(jmxPrefix)) + + // This exists so SocketServer (which uses Client libraries) can use the client Time objects without having to convert all of Kafka to use them + // Once we get rid of kafka.utils.time, we can get rid of this too + private val socketServerTime: org.apache.kafka.common.utils.Time = new org.apache.kafka.common.utils.SystemTime() + + private val metricConfig: MetricConfig = new MetricConfig() + .samples(config.metricNumSamples) + .timeWindow(config.metricSampleWindowMs, TimeUnit.MILLISECONDS) + val brokerState: BrokerState = new BrokerState var apis: KafkaApis = null @@ -120,8 +132,9 @@ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logg config.brokerId = getBrokerId this.logIdent = "[Kafka Server " + config.brokerId + "], " + val metrics = new Metrics(metricConfig, reporters, socketServerTime) - socketServer = new SocketServer(config) + socketServer = new SocketServer(config, metrics, socketServerTime) socketServer.startup() /* start replica manager */ diff --git a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala index 6fd4a4f..f7bf529 100644 --- a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala +++ b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala @@ -50,7 +50,7 @@ class SocketServerTest extends JUnitSuite { props.put("max.connections.per.ip", "5") props.put("connections.max.idle.ms", "60000") val config: KafkaConfig = KafkaConfig.fromProps(props) - val server: SocketServer = new SocketServer(config) + val server: SocketServer = new SocketServer(config, new Metrics(), new SystemTime()) server.startup() def sendRequest(socket: Socket, id: Short, request: Array[Byte]) { @@ -179,7 +179,7 @@ class SocketServerTest extends JUnitSuite { val overrideNum = 6 val overrides: Map[String, Int] = Map("localhost" -> overrideNum) val overrideprops = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0) - val overrideServer: SocketServer = new SocketServer(KafkaConfig.fromProps(overrideprops)) + val overrideServer: SocketServer = new SocketServer(KafkaConfig.fromProps(overrideprops), new Metrics(), new SystemTime()) overrideServer.startup() // make the maximum allowable number of connections and then leak them val conns = ((0 until overrideNum).map(i => connect(overrideServer))) @@ -196,16 +196,30 @@ class SocketServerTest extends JUnitSuite { val overrideprops = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0, enableSSL = true, trustStoreFile = Some(trustStoreFile)) overrideprops.put("listeners", "SSL://localhost:0") - val overrideServer: SocketServer = new SocketServer(KafkaConfig.fromProps(overrideprops)) + val overrideServer: SocketServer = new SocketServer(KafkaConfig.fromProps(overrideprops), new Metrics(), new SystemTime()) overrideServer.startup() val sslContext = SSLContext.getInstance("TLSv1.2") sslContext.init(null, Array(TestUtils.trustAllCerts), new java.security.SecureRandom()) val socketFactory = sslContext.getSocketFactory - val socket = socketFactory.createSocket("localhost", overrideServer.boundPort(SecurityProtocol.SSL)).asInstanceOf[SSLSocket] - socket.setNeedClientAuth(false) - val bytes = new Array[Byte](40) - sendRequest(socket, 0, bytes) + val sslSocket = socketFactory.createSocket("localhost", overrideServer.boundPort(SecurityProtocol.SSL)).asInstanceOf[SSLSocket] + sslSocket.setNeedClientAuth(false) + + val correlationId = -1 + val clientId = SyncProducerConfig.DefaultClientId + val ackTimeoutMs = SyncProducerConfig.DefaultAckTimeoutMs + val ack = SyncProducerConfig.DefaultRequiredAcks + val emptyRequest = + new ProducerRequest(correlationId, clientId, ack, ackTimeoutMs, collection.mutable.Map[TopicAndPartition, ByteBufferMessageSet]()) + + val byteBuffer = ByteBuffer.allocate(emptyRequest.sizeInBytes) + emptyRequest.writeTo(byteBuffer) + byteBuffer.rewind() + val serializedBytes = new Array[Byte](byteBuffer.remaining) + byteBuffer.get(serializedBytes) + + sendRequest(sslSocket, 0, serializedBytes) processRequest(overrideServer.requestChannel) + assertEquals(serializedBytes.toSeq, receiveResponse(sslSocket).toSeq) overrideServer.shutdown() } } diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala index 3ef7ed7..eaac4ce 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigConfigDefTest.scala @@ -247,8 +247,7 @@ class KafkaConfigConfigDefTest extends JUnit3Suite { case KafkaConfig.SSLTruststoreLocationProp => expected.setProperty(name, "/tmp/truststore.jks") case KafkaConfig.SSLKeyManagerAlgorithmProp => expected.setProperty(name, "ssl") case KafkaConfig.SSLTrustManagerAlgorithmProp => expected.setProperty(name, "tls") - case KafkaConfig.SSLNeedClientAuthProp => expected.setProperty(name, randFrom("true", "false")) - case KafkaConfig.SSLWantClientAuthProp => expected.setProperty(name, randFrom("true", "false")) + case KafkaConfig.SSLClientAuthProp => expected.setProperty(name, randFrom("none", "requested", "required")) case nonNegativeIntProperty => expected.setProperty(name, nextInt(Int.MaxValue).toString) } }) @@ -370,8 +369,7 @@ class KafkaConfigConfigDefTest extends JUnit3Suite { case KafkaConfig.SSLTruststoreLocationProp => // ignore string case KafkaConfig.SSLKeyManagerAlgorithmProp => case KafkaConfig.SSLTrustManagerAlgorithmProp => - case KafkaConfig.SSLNeedClientAuthProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean", "0") - case KafkaConfig.SSLWantClientAuthProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean", "0") + case KafkaConfig.SSLClientAuthProp => // ignore string case nonNegativeIntProperty => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "-1") } -- 2.4.6 From 4338f464896314de1989e196a3be0eac255e7b6f Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Sun, 19 Jul 2015 17:24:46 -0700 Subject: [PATCH 23/30] KAFKA-1690. Addressing reviews. --- .../kafka/common/network/ByteBufferSend.java | 4 +- .../apache/kafka/common/network/KafkaChannel.java | 4 +- .../kafka/common/network/NetworkReceive.java | 4 +- .../common/network/PlainTextTransportLayer.java | 23 +++++- .../kafka/common/network/SSLChannelBuilder.java | 2 +- .../kafka/common/network/SSLTransportLayer.java | 82 ++++++++++++++++------ .../kafka/common/network/TransportLayer.java | 10 ++- 7 files changed, 98 insertions(+), 31 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java b/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java index 86fc6f7..f409874 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java @@ -61,8 +61,8 @@ public class ByteBufferSend implements Send { // This is temporary workaround. As Send , Receive interfaces are being used by BlockingChannel. // Once BlockingChannel is removed we can make Send, Receive to work with transportLayer rather than // GatheringByteChannel or ScatteringByteChannel. - if (channel instanceof SSLTransportLayer) { - pending = ((SSLTransportLayer) channel).pending(); + if (channel instanceof TransportLayer) { + pending = ((TransportLayer) channel).hasPendingWrites(); } return written; } diff --git a/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java b/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java index 72d5a75..68da18c 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java +++ b/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java @@ -149,9 +149,9 @@ public class KafkaChannel { private boolean send(Send send) throws IOException { send.writeTo(transportLayer); - if (send.completed()) { + if (send.completed()) transportLayer.removeInterestOps(SelectionKey.OP_WRITE); - } + return send.completed(); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java b/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java index 6444453..2a1568e 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java +++ b/clients/src/main/java/org/apache/kafka/common/network/NetworkReceive.java @@ -87,9 +87,9 @@ public class NetworkReceive implements Receive { int receiveSize = size.getInt(); if (receiveSize < 0) throw new InvalidReceiveException("Invalid receive (size = " + receiveSize + ")"); - if (maxSize != UNLIMITED && receiveSize > maxSize) { + if (maxSize != UNLIMITED && receiveSize > maxSize) throw new InvalidReceiveException("Invalid receive (size = " + receiveSize + " larger than " + maxSize + ")"); - } + this.buffer = ByteBuffer.allocate(receiveSize); } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java index d6e9009..cff87f0 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java @@ -48,6 +48,7 @@ public class PlainTextTransportLayer implements TransportLayer { return true; } + @Override public void finishConnect() throws IOException { socketChannel.finishConnect(); int ops = key.interestOps(); @@ -56,19 +57,22 @@ public class PlainTextTransportLayer implements TransportLayer { key.interestOps(ops); } + @Override public void disconnect() { key.cancel(); } + @Override public SocketChannel socketChannel() { return socketChannel; } - + @Override public boolean isOpen() { return socketChannel.isOpen(); } + @Override public boolean isConnected() { return socketChannel.isConnected(); } @@ -78,6 +82,7 @@ public class PlainTextTransportLayer implements TransportLayer { * * @throws IOException If and I/O error occurs */ + @Override public void close() throws IOException { socketChannel.socket().close(); socketChannel.close(); @@ -97,7 +102,7 @@ public class PlainTextTransportLayer implements TransportLayer { * @return The number of bytes read, possible zero or -1 if the channel has reached end-of-stream * @throws IOException if some other I/O error occurs */ - + @Override public int read(ByteBuffer dst) throws IOException { return socketChannel.read(dst); } @@ -109,6 +114,7 @@ public class PlainTextTransportLayer implements TransportLayer { * @return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. * @throws IOException if some other I/O error occurs */ + @Override public long read(ByteBuffer[] dsts) throws IOException { return socketChannel.read(dsts); } @@ -121,6 +127,7 @@ public class PlainTextTransportLayer implements TransportLayer { * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. * @throws IOException if some other I/O error occurs */ + @Override public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { return socketChannel.read(dsts, offset, length); } @@ -132,6 +139,7 @@ public class PlainTextTransportLayer implements TransportLayer { * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream * @throws IOException If some other I/O error occurs */ + @Override public int write(ByteBuffer src) throws IOException { return socketChannel.write(src); } @@ -143,6 +151,7 @@ public class PlainTextTransportLayer implements TransportLayer { * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream * @throws IOException If some other I/O error occurs */ + @Override public long write(ByteBuffer[] srcs) throws IOException { return socketChannel.write(srcs); } @@ -156,10 +165,20 @@ public class PlainTextTransportLayer implements TransportLayer { * @return returns no.of bytes written , possibly zero. * @throws IOException If some other I/O error occurs */ + @Override public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { return socketChannel.write(srcs, offset, length); } + + /** + * always returns false as there will be not be any + * pending writes + */ + public boolean hasPendingWrites() { + return false; + } + /** * Rerturns ANONYMOUS as Principal. */ diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java index 17c33c3..2e0525c 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java @@ -48,7 +48,7 @@ public class SSLChannelBuilder implements ChannelBuilder { KafkaChannel channel = null; try { SocketChannel socketChannel = (SocketChannel) key.channel(); - SSLTransportLayer transportLayer = new SSLTransportLayer(key, + SSLTransportLayer transportLayer = new SSLTransportLayer(id, key, sslFactory.createSSLEngine(socketChannel.socket().getInetAddress().getHostName(), socketChannel.socket().getPort())); Authenticator authenticator = new DefaultAuthenticator(); diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index fe60603..80f8b4a 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -43,21 +43,23 @@ import org.slf4j.LoggerFactory; public class SSLTransportLayer implements TransportLayer { private static final Logger log = LoggerFactory.getLogger(SSLTransportLayer.class); + private String channelId; protected SSLEngine sslEngine; private SelectionKey key; private SocketChannel socketChannel; private HandshakeStatus handshakeStatus; private SSLEngineResult handshakeResult; private boolean handshakeComplete = false; - private boolean closed = false; private boolean closing = false; private ByteBuffer netReadBuffer; private ByteBuffer netWriteBuffer; private ByteBuffer appReadBuffer; private ByteBuffer emptyBuf = ByteBuffer.allocate(0); + // interestOps used to cache any pending interestOps during the handshake. private int interestOps; - public SSLTransportLayer(SelectionKey key, SSLEngine sslEngine) throws IOException { + public SSLTransportLayer(String channelId, SelectionKey key, SSLEngine sslEngine) throws IOException { + this.channelId = channelId; this.key = key; this.socketChannel = (SocketChannel) key.channel(); this.sslEngine = sslEngine; @@ -72,13 +74,14 @@ public class SSLTransportLayer implements TransportLayer { * sslEngine handshakeStatus. */ private void startHandshake() throws IOException { + //clear & set netRead & netWrite buffers netWriteBuffer.position(0); netWriteBuffer.limit(0); netReadBuffer.position(0); netReadBuffer.limit(0); handshakeComplete = false; - closed = false; closing = false; + //caching OP_READ to set it after handshake is done addInterestOps(SelectionKey.OP_READ); //initiate handshake sslEngine.beginHandshake(); @@ -95,8 +98,10 @@ public class SSLTransportLayer implements TransportLayer { /** * does socketChannel.finishConnect() */ + @Override public void finishConnect() throws IOException { socketChannel.finishConnect(); + // caching interestOps to set after the handshake is finished. removeInterestOps(SelectionKey.OP_CONNECT); key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT | SelectionKey.OP_READ); } @@ -104,18 +109,22 @@ public class SSLTransportLayer implements TransportLayer { /** * disconnects selectionKey. */ + @Override public void disconnect() { key.cancel(); } + @Override public SocketChannel socketChannel() { return socketChannel; } + @Override public boolean isOpen() { return socketChannel.isOpen(); } + @Override public boolean isConnected() { return socketChannel.isConnected(); } @@ -126,6 +135,7 @@ public class SSLTransportLayer implements TransportLayer { * @throws IOException if an I/O error occurs * @throws IOException if there is data on the outgoing network buffer and we are unable to flush it */ + @Override public void close() throws IOException { if (closing) return; closing = true; @@ -154,7 +164,7 @@ public class SSLTransportLayer implements TransportLayer { /** * returns true if there are any pending contents in netWriteBuffer */ - public boolean pending() { + public boolean hasPendingWrites() { return netWriteBuffer.hasRemaining(); } @@ -175,6 +185,24 @@ public class SSLTransportLayer implements TransportLayer { /** * Performs SSL handshake, non blocking. + * Before application data (kafka protocols) can be sent client & kafka broker must + * perform ssl handshake. + * During the handshake wrap, unwrap generate and consumer handshake data and will be transported over socketChannel. + * Each SSLEngine operation generates SSLEngineResult , of which SSLEngineResult.handshakeStatus field is used to + * determine what operation needs to occur to move handshake along. + * A typical handshake might look like this. + * +-------------+----------------------------------+-------------+ + * | client | SSL/TLS message | HSStatus | + * +-------------+----------------------------------+-------------+ + * | wrap() | ClientHello | NEED_UNWRAP | + * | unwrap() | ServerHello/Cert/ServerHelloDone | NEED_WRAP | + * | wrap() | ClientKeyExchange | NEED_WRAP | + * | wrap() | ChangeCipherSpec | NEED_WRAP | + * | wrap() | Finished | NEED_UNWRAP | + * | unwrap() | ChangeCipherSpec | NEED_UNWRAP | + * | unwrap() | Finished | FINISHED | + * +-------------+----------------------------------+-------------+ + * * @throws IOException */ public void handshake() throws IOException { @@ -189,9 +217,11 @@ public class SSLTransportLayer implements TransportLayer { try { switch (handshakeStatus) { case NEED_TASK: + log.trace("SSLHandshake NEED_TASK", channelId); handshakeStatus = runDelegatedTasks(); break; case NEED_WRAP: + log.trace("SSLHandshake NEED_WRAP", channelId); handshakeResult = handshakeWrap(write); if (handshakeResult.getStatus() == Status.BUFFER_OVERFLOW) { int currentPacketBufferSize = packetBufferSize(); @@ -205,13 +235,14 @@ public class SSLTransportLayer implements TransportLayer { } else if (handshakeResult.getStatus() == Status.CLOSED) { throw new EOFException(); } - //fall down to NEED_UNWRAP on the same call, will result in a - //BUFFER_UNDERFLOW if it needs data + //if handshake status is not NEED_UNWRAP or unable to flush netWriteBuffer contents + //we will break here otherwise we can do need_unwrap in the same call. if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || (!write && !flush(netWriteBuffer))) { key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); break; } case NEED_UNWRAP: + log.trace("SSLHandshake NEED_UNWRAP", channelId); handshakeResult = handshakeUnwrap(read); if (handshakeResult.getStatus() == Status.BUFFER_UNDERFLOW) { int currentPacketBufferSize = packetBufferSize(); @@ -221,9 +252,9 @@ public class SSLTransportLayer implements TransportLayer { } } else if (handshakeResult.getStatus() == Status.BUFFER_OVERFLOW) { int currentAppBufferSize = applicationBufferSize(); - netReadBuffer = Utils.ensureCapacity(netReadBuffer, currentAppBufferSize); - if (netReadBuffer.position() > currentAppBufferSize) { - throw new IllegalStateException("Buffer underflow when available data size (" + netReadBuffer.position() + + appReadBuffer = Utils.ensureCapacity(appReadBuffer, currentAppBufferSize); + if (appReadBuffer.position() > currentAppBufferSize) { + throw new IllegalStateException("Buffer underflow when available data size (" + appReadBuffer.position() + ") > packet buffer size (" + currentAppBufferSize + ")"); } } else if (handshakeResult.getStatus() == Status.CLOSED) { @@ -280,6 +311,7 @@ public class SSLTransportLayer implements TransportLayer { // It can move from FINISHED status to NOT_HANDSHAKING after the handshake is completed. // Hence we also need to check handshakeResult.getHandshakeStatus() if the handshake finished or not if (handshakeResult.getHandshakeStatus() == HandshakeStatus.FINISHED) { + log.trace("SSLHandshake FINISHED", channelId); //we are complete if we have delivered the last package handshakeComplete = !netWriteBuffer.hasRemaining(); //set interestOps if we are complete, otherwise we still have data to write @@ -299,6 +331,7 @@ public class SSLTransportLayer implements TransportLayer { * @throws IOException */ private SSLEngineResult handshakeWrap(Boolean doWrite) throws IOException { + log.trace("SSLHandshake handshakeWrap", channelId); if (netWriteBuffer.hasRemaining()) throw new IllegalStateException("handshakeWrap called with netWriteBuffer not empty"); //this should never be called with a network buffer that contains data @@ -324,6 +357,7 @@ public class SSLTransportLayer implements TransportLayer { * @throws IOException */ private SSLEngineResult handshakeUnwrap(Boolean doRead) throws IOException { + log.trace("SSLHandshake handshakeUnwrap", channelId); if (netReadBuffer.position() == netReadBuffer.limit()) { //clear the buffer if we have emptied it out on data netReadBuffer.clear(); @@ -331,7 +365,7 @@ public class SSLTransportLayer implements TransportLayer { if (doRead) { int read = socketChannel.read(netReadBuffer); - if (read == -1) throw new IOException("EOF during handshake."); + if (read == -1) throw new EOFException("EOF during handshake."); } SSLEngineResult result; boolean cont = false; @@ -347,6 +381,7 @@ public class SSLTransportLayer implements TransportLayer { } cont = result.getStatus() == SSLEngineResult.Status.OK && handshakeStatus == HandshakeStatus.NEED_UNWRAP; + log.trace("SSLHandshake handshakeUnwrap: handshakeStatus ", handshakeStatus); } while (cont); return result; } @@ -361,8 +396,9 @@ public class SSLTransportLayer implements TransportLayer { * @return The number of bytes read, possible zero or -1 if the channel has reached end-of-stream * @throws IOException if some other I/O error occurs */ + @Override public int read(ByteBuffer dst) throws IOException { - if (closing || closed) return -1; + if (closing) return -1; int read = 0; if (!handshakeComplete) return read; @@ -424,6 +460,7 @@ public class SSLTransportLayer implements TransportLayer { * @return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. * @throws IOException if some other I/O error occurs */ + @Override public long read(ByteBuffer[] dsts) throws IOException { return read(dsts, 0, dsts.length); } @@ -437,6 +474,7 @@ public class SSLTransportLayer implements TransportLayer { * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. * @throws IOException if some other I/O error occurs */ + @Override public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { if ((offset < 0) || (length < 0) || (offset > dsts.length - length)) throw new IndexOutOfBoundsException(); @@ -465,35 +503,36 @@ public class SSLTransportLayer implements TransportLayer { * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream * @throws IOException If some other I/O error occurs */ + @Override public int write(ByteBuffer src) throws IOException { int written = 0; - if (closing || closed) throw new IOException("Channel is in closing state"); + if (closing) throw new IllegalStateException("Channel is in closing state"); if (!handshakeComplete) return written; if (!flush(netWriteBuffer)) return written; netWriteBuffer.clear(); - SSLEngineResult wrap = sslEngine.wrap(src, netWriteBuffer); + SSLEngineResult wrapResult = sslEngine.wrap(src, netWriteBuffer); netWriteBuffer.flip(); //handle ssl renegotiation - if (wrap.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) { + if (wrapResult.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) { handshake(); return written; } - if (wrap.getStatus() == Status.OK) { - written = wrap.bytesConsumed(); + if (wrapResult.getStatus() == Status.OK) { + written = wrapResult.bytesConsumed(); flush(netWriteBuffer); - } else if (wrap.getStatus() == Status.BUFFER_OVERFLOW) { + } else if (wrapResult.getStatus() == Status.BUFFER_OVERFLOW) { int currentPacketBufferSize = packetBufferSize(); netWriteBuffer = Utils.ensureCapacity(netReadBuffer, packetBufferSize()); if (netWriteBuffer.position() >= currentPacketBufferSize) throw new IllegalStateException("SSL BUFFER_OVERFLOW when available data size (" + netWriteBuffer.position() + ") >= network buffer size (" + currentPacketBufferSize + ")"); - } else if (wrap.getStatus() == Status.BUFFER_UNDERFLOW) { + } else if (wrapResult.getStatus() == Status.BUFFER_UNDERFLOW) { throw new IllegalStateException("SSL BUFFER_UNDERFLOW during write"); - } else if (wrap.getStatus() == Status.CLOSED) { + } else if (wrapResult.getStatus() == Status.CLOSED) { throw new EOFException(); } return written; @@ -508,6 +547,7 @@ public class SSLTransportLayer implements TransportLayer { * @return returns no.of bytes written , possibly zero. * @throws IOException If some other I/O error occurs */ + @Override public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { if ((offset < 0) || (length < 0) || (offset > srcs.length - length)) throw new IndexOutOfBoundsException(); @@ -515,7 +555,7 @@ public class SSLTransportLayer implements TransportLayer { int totalWritten = 0; int i = offset; while (i < length) { - if (srcs[i].hasRemaining() || pending()) { + if (srcs[i].hasRemaining() || hasPendingWrites()) { int written = write(srcs[i]); if (written > 0) { totalWritten += written; @@ -539,7 +579,7 @@ public class SSLTransportLayer implements TransportLayer { * @return returns no.of bytes consumed by SSLEngine.wrap , possibly zero. * @throws IOException If some other I/O error occurs */ - + @Override public long write(ByteBuffer[] srcs) throws IOException { return write(srcs, 0, srcs.length); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java index 411423c..8ff8668 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java @@ -18,7 +18,11 @@ package org.apache.kafka.common.network; /* - * Transport layer for underlying communication + * Transport layer for underlying communication. + * At very basic level it is wrapper around SocketChannel and can be used as substitue for SocketChannel + * and other network Channel implementations. + * As NetworkClient replaces BlockingChannel and other implementations we will be using KafkaChannel as + * a network I/O channel. */ import java.io.IOException; import java.nio.channels.SocketChannel; @@ -63,6 +67,10 @@ public interface TransportLayer extends ScatteringByteChannel, GatheringByteChan */ void handshake() throws IOException; + /** + * Returns true if there are any pending writes + */ + boolean hasPendingWrites(); /** * returns SSLSession.getPeerPrinicpal if SSLTransportLayer used -- 2.4.6 From 2d93f2448a4a2b4285dd1803b9f17c96ce1a796c Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Mon, 20 Jul 2015 09:44:03 -0700 Subject: [PATCH 24/30] KAFKA-1690. Addressing reviews. Removed interestOps from SSLTransportLayer. --- .../org/apache/kafka/clients/NetworkClient.java | 4 +- .../kafka/common/network/SSLTransportLayer.java | 25 ++-------- .../apache/kafka/common/network/Selectable.java | 11 +++-- .../org/apache/kafka/common/network/Selector.java | 10 +++- .../kafka/common/network/SSLSelectorTest.java | 56 ++++------------------ .../java/org/apache/kafka/test/MockSelector.java | 8 +++- 6 files changed, 39 insertions(+), 75 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java index d90daa4..0a472d4 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java @@ -170,7 +170,7 @@ public class NetworkClient implements KafkaClient { * @param node The node */ private boolean isSendable(String node) { - return connectionStates.isConnected(node) && inFlightRequests.canSendMore(node); + return connectionStates.isConnected(node) && selector.isChannelReady(node) && inFlightRequests.canSendMore(node); } /** @@ -473,7 +473,7 @@ public class NetworkClient implements KafkaClient { String nodeConnectionId = node.idString(); - if (connectionStates.isConnected(nodeConnectionId) && inFlightRequests.canSendMore(nodeConnectionId)) { + if (isSendable(nodeConnectionId)) { Set topics = metadata.topics(); this.metadataFetchInProgress = true; ClientRequest metadataRequest = metadataRequest(now, nodeConnectionId, topics); diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index 80f8b4a..4ac215c 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -55,8 +55,6 @@ public class SSLTransportLayer implements TransportLayer { private ByteBuffer netWriteBuffer; private ByteBuffer appReadBuffer; private ByteBuffer emptyBuf = ByteBuffer.allocate(0); - // interestOps used to cache any pending interestOps during the handshake. - private int interestOps; public SSLTransportLayer(String channelId, SelectionKey key, SSLEngine sslEngine) throws IOException { this.channelId = channelId; @@ -70,8 +68,7 @@ public class SSLTransportLayer implements TransportLayer { } /** - * starts sslEngine handshake process and sets the selectionKey interestOps based - * sslEngine handshakeStatus. + * starts sslEngine handshake process */ private void startHandshake() throws IOException { //clear & set netRead & netWrite buffers @@ -81,13 +78,9 @@ public class SSLTransportLayer implements TransportLayer { netReadBuffer.limit(0); handshakeComplete = false; closing = false; - //caching OP_READ to set it after handshake is done - addInterestOps(SelectionKey.OP_READ); //initiate handshake sslEngine.beginHandshake(); handshakeStatus = sslEngine.getHandshakeStatus(); - if (handshakeStatus == HandshakeStatus.NEED_WRAP) - key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); } @@ -101,8 +94,6 @@ public class SSLTransportLayer implements TransportLayer { @Override public void finishConnect() throws IOException { socketChannel.finishConnect(); - // caching interestOps to set after the handshake is finished. - removeInterestOps(SelectionKey.OP_CONNECT); key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT | SelectionKey.OP_READ); } @@ -315,9 +306,7 @@ public class SSLTransportLayer implements TransportLayer { //we are complete if we have delivered the last package handshakeComplete = !netWriteBuffer.hasRemaining(); //set interestOps if we are complete, otherwise we still have data to write - if (handshakeComplete) - key.interestOps(interestOps); - else + if (!handshakeComplete) key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); } else { throw new IOException("NOT_HANDSHAKING during handshake"); @@ -610,11 +599,8 @@ public class SSLTransportLayer implements TransportLayer { * @param ops SelectionKey interestOps */ public void addInterestOps(int ops) { - interestOps |= ops; - // if handshake is not complete and key is cancelled. - // we should check for key.isValid. if (handshakeComplete) - key.interestOps(interestOps); + key.interestOps(key.interestOps() | ops); else if (!key.isValid()) throw new CancelledKeyException(); } @@ -624,11 +610,8 @@ public class SSLTransportLayer implements TransportLayer { * @param ops SelectionKey interestOps */ public void removeInterestOps(int ops) { - interestOps &= ~ops; - // if handshake is not complete and key is cancelled. - // we should check for key.isValid. if (handshakeComplete) - key.interestOps(interestOps); + key.interestOps(key.interestOps() & ~ops); else if (!key.isValid()) throw new CancelledKeyException(); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selectable.java b/clients/src/main/java/org/apache/kafka/common/network/Selectable.java index 0f84ea7..39eae4a 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selectable.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selectable.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -104,4 +104,9 @@ public interface Selectable { */ public void unmuteAll(); -} \ No newline at end of file + /** + * returns true if a channel is ready + * @param id The id for the connection + */ + public boolean isChannelReady(String id); +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index a4391be..95f48f7 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -423,7 +423,7 @@ public class Selector implements Selectable { } /** - * Begin clsoing this connection + * Begin closing this connection * @param id channel id */ public void close(String id) { @@ -446,6 +446,14 @@ public class Selector implements Selectable { } /** + * check if channel is ready + */ + public boolean isChannelReady(String id) { + KafkaChannel channel = this.channels.get(id); + return channel.ready(); + } + + /** * Get the channel associated with this numeric id */ private KafkaChannel channelForId(String id) { diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java index 478afbb..051d36a 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java @@ -149,7 +149,8 @@ public class SSLSelectorTest { public void testMute() throws Exception { blockingConnect("0"); blockingConnect("1"); - + while (!selector.isChannelReady("0") && !selector.isChannelReady("1")) + selector.poll(5); selector.send(createSend("0", "hello")); selector.send(createSend("1", "hi")); selector.mute("1"); @@ -166,52 +167,12 @@ public class SSLSelectorTest { assertEquals("The response should be from the previously muted node", "1", selector.completedReceives().get(0).source()); } - /** - * Tests that SSL renegotiation initiated by the server are handled correctly by the client - * @throws Exception - */ - // @Test - // public void testRenegotiation() throws Exception { - // int reqs = 500; - // String node = "0"; - // // create connections - // InetSocketAddress addr = new InetSocketAddress("localhost", server.port); - // selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); - - // // send echo requests and receive responses - // int requests = 0; - // int responses = 0; - // int renegotiates = 0; - // selector.send(createSend(node, node + "-" + 0)); - // requests++; - - // // loop until we complete all requests - // while (responses < reqs) { - // selector.poll(0L); - // if (responses >= 100 && renegotiates == 0) { - // renegotiates++; - // server.renegotiate(); - // } - // assertEquals("No disconnects should have occurred.", 0, selector.disconnected().size()); - - // // handle any responses we may have gotten - // for (NetworkReceive receive : selector.completedReceives()) { - // String[] pieces = asString(receive).split("-"); - // assertEquals("Should be in the form 'conn-counter'", 2, pieces.length); - // assertEquals("Check the source", receive.source(), pieces[0]); - // assertEquals("Check that the receive has kindly been rewound", 0, receive.payload().position()); - // assertEquals("Check the request counter", responses, Integer.parseInt(pieces[1])); - // responses++; - // } - - // // prepare new sends for the next round - // for (int i = 0; i < selector.completedSends().size() && requests < reqs; i++, requests++) { - // selector.send(createSend(node, node + "-" + requests)); - // } - // } - // } private String blockingRequest(String node, String s) throws IOException { + while (!selector.isChannelReady(node)) { + selector.poll(1000L); + } + selector.send(createSend(node, s)); while (true) { selector.poll(1000L); @@ -240,6 +201,9 @@ public class SSLSelectorTest { private void sendAndReceive(String node, String requestPrefix, int startIndex, int endIndex) throws Exception { int requests = startIndex; int responses = startIndex; + while (!selector.isChannelReady(node)) { + selector.poll(1000L); + } selector.send(createSend(node, requestPrefix + "-" + startIndex)); requests++; while (responses < endIndex) { @@ -253,7 +217,7 @@ public class SSLSelectorTest { responses++; } - for (int i = 0; i < selector.completedSends().size() && requests < endIndex; i++, requests++) { + for (int i = 0; i < selector.completedSends().size() && requests < endIndex && selector.isChannelReady(node); i++, requests++) { selector.send(createSend(node, requestPrefix + "-" + requests)); } } diff --git a/clients/src/test/java/org/apache/kafka/test/MockSelector.java b/clients/src/test/java/org/apache/kafka/test/MockSelector.java index 51eb9d1..7257cad 100644 --- a/clients/src/test/java/org/apache/kafka/test/MockSelector.java +++ b/clients/src/test/java/org/apache/kafka/test/MockSelector.java @@ -3,9 +3,9 @@ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. @@ -120,4 +120,8 @@ public class MockSelector implements Selectable { public void unmuteAll() { } + @Override + public boolean isChannelReady(String id) { + return true; + } } -- 2.4.6 From 2fb226d58e1016ec1665b8996dbd956da952d29c Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Mon, 20 Jul 2015 11:17:17 -0700 Subject: [PATCH 25/30] KAFKA-1690. Addressing reviews. --- .../java/org/apache/kafka/common/network/SSLSelectorTest.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java index 051d36a..e25503b 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java @@ -149,6 +149,7 @@ public class SSLSelectorTest { public void testMute() throws Exception { blockingConnect("0"); blockingConnect("1"); + // wait for handshake to finish while (!selector.isChannelReady("0") && !selector.isChannelReady("1")) selector.poll(5); selector.send(createSend("0", "hello")); @@ -169,10 +170,6 @@ public class SSLSelectorTest { private String blockingRequest(String node, String s) throws IOException { - while (!selector.isChannelReady(node)) { - selector.poll(1000L); - } - selector.send(createSend(node, s)); while (true) { selector.poll(1000L); @@ -195,12 +192,16 @@ public class SSLSelectorTest { selector.connect(node, new InetSocketAddress("localhost", server.port), BUFFER_SIZE, BUFFER_SIZE); while (!selector.connected().contains(node)) selector.poll(10000L); + //finish the handshake as well + while (!selector.isChannelReady(node)) + selector.poll(10000L); } private void sendAndReceive(String node, String requestPrefix, int startIndex, int endIndex) throws Exception { int requests = startIndex; int responses = startIndex; + // wait for handshake to finish while (!selector.isChannelReady(node)) { selector.poll(1000L); } -- 2.4.6 From ddec2cfd6b84f4a8b8161f8f537d457b48b90140 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Fri, 24 Jul 2015 23:07:59 -0700 Subject: [PATCH 26/30] KAFKA-1690. added staged receives to selector. --- .../apache/kafka/common/network/KafkaChannel.java | 8 ++ .../common/network/PlainTextTransportLayer.java | 14 +++- .../kafka/common/network/SSLTransportLayer.java | 42 ++++++---- .../org/apache/kafka/common/network/Selector.java | 66 ++++++++++++++-- .../kafka/common/network/TransportLayer.java | 1 + .../kafka/common/network/SSLSelectorTest.java | 48 ++++++++++++ core/src/main/scala/kafka/api/FetchResponse.scala | 24 ++++-- .../main/scala/kafka/network/SocketServer.scala | 2 +- .../integration/kafka/api/ConsumerBounceTest.scala | 1 + .../kafka/api/SSLProducerSendTest.scala | 89 +++------------------- 10 files changed, 185 insertions(+), 110 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java b/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java index 68da18c..d8de9f2 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java +++ b/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java @@ -96,10 +96,18 @@ public class KafkaChannel { transportLayer.addInterestOps(SelectionKey.OP_READ); } + public boolean isMute() { + return transportLayer.isMute(); + } + public boolean ready() { return transportLayer.ready() && authenticator.complete(); } + public boolean hasSend() { + return send != null; + } + public String socketDescription() { Socket socket = transportLayer.socketChannel().socket(); if (socket == null) diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java index cff87f0..cc281db 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java @@ -44,6 +44,7 @@ public class PlainTextTransportLayer implements TransportLayer { this.socketChannel = (SocketChannel) key.channel(); } + @Override public boolean ready() { return true; } @@ -86,6 +87,8 @@ public class PlainTextTransportLayer implements TransportLayer { public void close() throws IOException { socketChannel.socket().close(); socketChannel.close(); + key.attach(null); + key.cancel(); } /** @@ -147,7 +150,7 @@ public class PlainTextTransportLayer implements TransportLayer { /** * Writes a sequence of bytes to this channel from the given buffer. * - * @param src The buffer from which bytes are to be retrieved + * @param srcs The buffer from which bytes are to be retrieved * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream * @throws IOException If some other I/O error occurs */ @@ -180,7 +183,7 @@ public class PlainTextTransportLayer implements TransportLayer { } /** - * Rerturns ANONYMOUS as Principal. + * Returns ANONYMOUS as Principal. */ public Principal peerPrincipal() throws IOException { return principal; @@ -188,7 +191,7 @@ public class PlainTextTransportLayer implements TransportLayer { /** * Adds the interestOps to selectionKey. - * @param SelectionKey interestOps + * @param interestOps */ public void addInterestOps(int ops) { key.interestOps(key.interestOps() | ops); @@ -197,10 +200,13 @@ public class PlainTextTransportLayer implements TransportLayer { /** * Removes the interestOps from selectionKey. - * @param SelectionKey interestOps + * @param interestOps */ public void removeInterestOps(int ops) { key.interestOps(key.interestOps() & ~ops); } + public boolean isMute() { + return key.isValid() && (key.interestOps() & SelectionKey.OP_READ) == 0; + } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index 4ac215c..b39524c 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -83,7 +83,7 @@ public class SSLTransportLayer implements TransportLayer { handshakeStatus = sslEngine.getHandshakeStatus(); } - + @Override public boolean ready() { return handshakeComplete; } @@ -150,13 +150,15 @@ public class SSLTransportLayer implements TransportLayer { } socketChannel.socket().close(); socketChannel.close(); + key.attach(null); + key.cancel(); } /** * returns true if there are any pending contents in netWriteBuffer */ public boolean hasPendingWrites() { - return netWriteBuffer.hasRemaining(); + return netWriteBuffer.remaining() != 0; } /** @@ -226,8 +228,10 @@ public class SSLTransportLayer implements TransportLayer { } else if (handshakeResult.getStatus() == Status.CLOSED) { throw new EOFException(); } + log.trace("SSLHandshake NEED_WRAP handshakeStatus ", channelId, handshakeResult); //if handshake status is not NEED_UNWRAP or unable to flush netWriteBuffer contents //we will break here otherwise we can do need_unwrap in the same call. + if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || (!write && !flush(netWriteBuffer))) { key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); break; @@ -351,13 +355,13 @@ public class SSLTransportLayer implements TransportLayer { //clear the buffer if we have emptied it out on data netReadBuffer.clear(); } - + SSLEngineResult result; + boolean cont = false; + int read = 0; if (doRead) { - int read = socketChannel.read(netReadBuffer); + read = socketChannel.read(netReadBuffer); if (read == -1) throw new EOFException("EOF during handshake."); } - SSLEngineResult result; - boolean cont = false; do { //prepare the buffer with the incoming data netReadBuffer.flip(); @@ -371,7 +375,8 @@ public class SSLTransportLayer implements TransportLayer { cont = result.getStatus() == SSLEngineResult.Status.OK && handshakeStatus == HandshakeStatus.NEED_UNWRAP; log.trace("SSLHandshake handshakeUnwrap: handshakeStatus ", handshakeStatus); - } while (cont); + } while (netReadBuffer.position() != 0 && cont); + return result; } @@ -540,7 +545,6 @@ public class SSLTransportLayer implements TransportLayer { public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { if ((offset < 0) || (length < 0) || (offset > srcs.length - length)) throw new IndexOutOfBoundsException(); - int totalWritten = 0; int i = offset; while (i < length) { @@ -550,7 +554,7 @@ public class SSLTransportLayer implements TransportLayer { totalWritten += written; } } - if (!srcs[i].hasRemaining()) { + if (!srcs[i].hasRemaining() && !hasPendingWrites()) { i++; } else { // if we are unable to write the current buffer to socketChannel we should break, @@ -599,10 +603,12 @@ public class SSLTransportLayer implements TransportLayer { * @param ops SelectionKey interestOps */ public void addInterestOps(int ops) { - if (handshakeComplete) - key.interestOps(key.interestOps() | ops); - else if (!key.isValid()) + if (!key.isValid()) throw new CancelledKeyException(); + else if (!handshakeComplete) + throw new IllegalStateException("handshake is not completed"); + + key.interestOps(key.interestOps() | ops); } /** @@ -610,10 +616,12 @@ public class SSLTransportLayer implements TransportLayer { * @param ops SelectionKey interestOps */ public void removeInterestOps(int ops) { - if (handshakeComplete) - key.interestOps(key.interestOps() & ~ops); - else if (!key.isValid()) + if (!key.isValid()) throw new CancelledKeyException(); + else if (!handshakeComplete) + throw new IllegalStateException("handshake is not completed"); + + key.interestOps(key.interestOps() & ~ops); } @@ -658,4 +666,8 @@ public class SSLTransportLayer implements TransportLayer { log.debug("SSLEngine.closeInBound() raised an exception.", e); } } + + public boolean isMute() { + return key.isValid() && (key.interestOps() & SelectionKey.OP_READ) == 0; + } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index 95f48f7..fcfb1d5 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -72,6 +72,7 @@ public class Selector implements Selectable { private final Map channels; private final List completedSends; private final List completedReceives; + private final Map> stagedReceives; private final List disconnected; private final List connected; private final List failedSends; @@ -105,6 +106,7 @@ public class Selector implements Selectable { this.channels = new HashMap(); this.completedSends = new ArrayList(); this.completedReceives = new ArrayList(); + this.stagedReceives = new HashMap>(); this.connected = new ArrayList(); this.disconnected = new ArrayList(); this.failedSends = new ArrayList(); @@ -242,7 +244,8 @@ public class Selector implements Selectable { @Override public void poll(long timeout) throws IOException { clear(); - + if (this.stagedReceives.size() > 0) + timeout = 0; /* check ready keys */ long startSelect = time.nanoseconds(); int readyKeys = select(timeout); @@ -271,18 +274,17 @@ public class Selector implements Selectable { } /* if channel is not ready finish prepare */ - if (channel.isConnected() && !channel.ready()) { + if (channel.isConnected() && !channel.ready()) channel.prepare(); - } /* if channel is ready read from any connections that have readable data */ if (channel.ready() && key.isReadable()) { NetworkReceive networkReceive; try { - if ((networkReceive = channel.read()) != null) { - this.completedReceives.add(networkReceive); - this.sensors.recordBytesReceived(channel.id(), networkReceive.payload().limit()); + while ((networkReceive = channel.read()) != null) { + addToStagedReceives(channel, networkReceive); } + addToCompletedReceives(channel); } catch (InvalidReceiveException e) { log.error("Invalid data received from " + channel.id() + " closing connection", e); close(channel); @@ -315,12 +317,16 @@ public class Selector implements Selectable { this.disconnected.add(channel.id()); } } + } else { + addToCompletedReceives(); } long endIo = time.nanoseconds(); this.sensors.ioTime.record(endIo - endSelect, time.milliseconds()); maybeCloseOldestConnection(); } + + @Override public List completedSends() { return this.completedSends; @@ -441,13 +447,16 @@ public class Selector implements Selectable { } catch (IOException e) { log.error("Exception closing connection to node {}:", channel.id(), e); } + this.stagedReceives.remove(channel); this.channels.remove(channel.id()); + this.lruConnections.remove(channel.id()); this.sensors.connectionClosed.record(); } /** * check if channel is ready */ + @Override public boolean isChannelReady(String id) { KafkaChannel channel = this.channels.get(id); return channel.ready(); @@ -470,6 +479,51 @@ public class Selector implements Selectable { return (KafkaChannel) key.attachment(); } + /** + * adds a receive to staged receieves + */ + private void addToStagedReceives(KafkaChannel channel, NetworkReceive receive) { + if (!stagedReceives.containsKey(channel)) + stagedReceives.put(channel, new ArrayDeque()); + + Deque deque = stagedReceives.get(channel); + deque.add(receive); + } + + /** + * checks if there are any staged receives and adds to completedReceives + */ + private void addToCompletedReceives() { + if (this.stagedReceives.size() > 0) { + Iterator>> iter = this.stagedReceives.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + KafkaChannel channel = entry.getKey(); + if (!channel.hasSend() && !channel.isMute()) { + Deque deque = entry.getValue(); + NetworkReceive networkReceive = deque.poll(); + this.completedReceives.add(networkReceive); + this.sensors.recordBytesReceived(channel.id(), networkReceive.payload().limit()); + if (deque.size() == 0) + iter.remove(); + } + } + } + } + + /** + * checks if there are any staged receives and adds to completedReceives + */ + private void addToCompletedReceives(KafkaChannel channel) { + Deque deque = this.stagedReceives.get(channel); + if (!channel.hasSend() && deque != null) { + NetworkReceive networkReceive = deque.poll(); + this.completedReceives.add(networkReceive); + this.sensors.recordBytesReceived(channel.id(), networkReceive.payload().limit()); + if (deque.size() == 0) + this.stagedReceives.remove(channel); + } + } private class SelectorMetrics { private final Metrics metrics; diff --git a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java index 8ff8668..e9158aa 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/TransportLayer.java @@ -82,4 +82,5 @@ public interface TransportLayer extends ScatteringByteChannel, GatheringByteChan void removeInterestOps(int ops); + boolean isMute(); } diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java index e25503b..df6279b 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java @@ -169,6 +169,54 @@ public class SSLSelectorTest { } + /** + * Tests that SSL renegotiation initiated by the server are handled correctly by the client + * @throws Exception + */ + @Test + public void testRenegotiation() throws Exception { + int reqs = 500; + String node = "0"; + // create connections + InetSocketAddress addr = new InetSocketAddress("localhost", server.port); + selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); + + // send echo requests and receive responses + int requests = 0; + int responses = 0; + int renegotiates = 0; + while (!selector.isChannelReady(node)) { + selector.poll(1000L); + } + selector.send(createSend(node, node + "-" + 0)); + requests++; + + // loop until we complete all requests + while (responses < reqs) { + selector.poll(0L); + if (responses >= 100 && renegotiates == 0) { + renegotiates++; + server.renegotiate(); + } + assertEquals("No disconnects should have occurred.", 0, selector.disconnected().size()); + + // handle any responses we may have gotten + for (NetworkReceive receive : selector.completedReceives()) { + String[] pieces = asString(receive).split("-"); + assertEquals("Should be in the form 'conn-counter'", 2, pieces.length); + assertEquals("Check the source", receive.source(), pieces[0]); + assertEquals("Check that the receive has kindly been rewound", 0, receive.payload().position()); + assertEquals("Check the request counter", responses, Integer.parseInt(pieces[1])); + responses++; + } + + // prepare new sends for the next round + for (int i = 0; i < selector.completedSends().size() && requests < reqs && selector.isChannelReady(node); i++, requests++) { + selector.send(createSend(node, node + "-" + requests)); + } + } + } + private String blockingRequest(String node, String s) throws IOException { selector.send(createSend(node, s)); while (true) { diff --git a/core/src/main/scala/kafka/api/FetchResponse.scala b/core/src/main/scala/kafka/api/FetchResponse.scala index 0b6b33a..b9efec2 100644 --- a/core/src/main/scala/kafka/api/FetchResponse.scala +++ b/core/src/main/scala/kafka/api/FetchResponse.scala @@ -24,6 +24,7 @@ import kafka.common.{TopicAndPartition, ErrorMapping} import kafka.message.{MessageSet, ByteBufferMessageSet} import kafka.api.ApiUtils._ import org.apache.kafka.common.KafkaException +import org.apache.kafka.common.network.TransportLayer import org.apache.kafka.common.network.Send import org.apache.kafka.common.network.MultiSend @@ -56,7 +57,7 @@ class PartitionDataSend(val partitionId: Int, val partitionData: FetchResponsePartitionData) extends Send { private val messageSize = partitionData.messages.sizeInBytes private var messagesSentSize = 0 - + private var pending = false private val buffer = ByteBuffer.allocate( 4 /** partitionId **/ + FetchResponsePartitionData.headerSize) buffer.putInt(partitionId) buffer.putShort(partitionData.error) @@ -64,7 +65,7 @@ class PartitionDataSend(val partitionId: Int, buffer.putInt(partitionData.messages.sizeInBytes) buffer.rewind() - override def completed = !buffer.hasRemaining && messagesSentSize >= messageSize + override def completed = !buffer.hasRemaining && messagesSentSize >= messageSize && !pending override def destination: String = "" @@ -77,6 +78,8 @@ class PartitionDataSend(val partitionId: Int, messagesSentSize += bytesSent written += bytesSent } + if (channel.isInstanceOf[TransportLayer]) + pending = channel.asInstanceOf[TransportLayer].hasPendingWrites written } @@ -111,7 +114,9 @@ class TopicDataSend(val dest: String, val topicData: TopicData) extends Send { private var sent = 0L - override def completed: Boolean = sent >= size + private var pending = false + + override def completed: Boolean = sent >= size && !pending override def destination: String = dest @@ -135,6 +140,10 @@ class TopicDataSend(val dest: String, val topicData: TopicData) extends Send { if(!buffer.hasRemaining && !sends.completed) { written += sends.writeTo(channel) } + + if (channel.isInstanceOf[TransportLayer]) + pending = channel.asInstanceOf[TransportLayer].hasPendingWrites + sent += written written } @@ -214,9 +223,11 @@ class FetchResponseSend(val dest: String, val fetchResponse: FetchResponse) exte private var sent = 0L + private var pending = false + override def size = 4 /* for size byte */ + payloadSize - override def completed = sent >= size + override def completed = sent >= size && !pending override def destination = dest @@ -242,7 +253,10 @@ class FetchResponseSend(val dest: String, val fetchResponse: FetchResponse) exte written += sends.writeTo(channel) } sent += written + + if (channel.isInstanceOf[TransportLayer]) + pending = channel.asInstanceOf[TransportLayer].hasPendingWrites + written } } - diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index 272ca27..f80f7bb 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -191,7 +191,7 @@ private[kafka] abstract class AbstractServerThread(connectionQuotas: ConnectionQ def close(channel: SocketChannel) { if(channel != null) { - println("Closing connection from " + channel.socket.getRemoteSocketAddress()) + debug("Closing connection from " + channel.socket.getRemoteSocketAddress()) connectionQuotas.dec(channel.socket.getInetAddress) swallowError(channel.socket().close()) swallowError(channel.close()) diff --git a/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala index b0750fa..8b10b02 100644 --- a/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala +++ b/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala @@ -96,6 +96,7 @@ class ConsumerBounceTest extends IntegrationTestHarness with Logging { def testSeekAndCommitWithBrokerFailures() = seekAndCommitWithBrokerFailures(5) def seekAndCommitWithBrokerFailures(numIters: Int) { + println("testSeekAndCommitWithBrokerFailures") val numRecords = 1000 sendRecords(numRecords) this.producers.foreach(_.close) diff --git a/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala b/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala index 3a9239c..8d72fab 100644 --- a/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala +++ b/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala @@ -47,7 +47,7 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { private var consumer2: SimpleConsumer = null private val topic = "topic" - private val numRecords = 100 + private val numRecords = 10 override def setUp() { super.setUp() @@ -135,55 +135,6 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { } } - @Test - def testSerializer() { - // send a record with a wrong type should receive a serialization exception - try { - val producer = createNewProducerWithWrongSerializer(brokerList) - val record5 = new ProducerRecord[Array[Byte],Array[Byte]](topic, new Integer(0), "key".getBytes, "value".getBytes) - producer.send(record5) - fail("Should have gotten a SerializationException") - } catch { - case se: SerializationException => // this is ok - } - - try { - createNewProducerWithNoSerializer(brokerList) - fail("Instantiating a producer without specifying a serializer should cause a ConfigException") - } catch { - case ce : ConfigException => // this is ok - } - - // create a producer with explicit serializers should succeed - createNewProducerWithExplicitSerializer(brokerList) - } - - private def createNewProducerWithWrongSerializer(brokerList: String) : KafkaProducer[Array[Byte],Array[Byte]] = { - import org.apache.kafka.clients.producer.ProducerConfig - - val producerProps = new Properties() - producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList) - producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer") - producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer") - return new KafkaProducer[Array[Byte],Array[Byte]](producerProps) - } - - private def createNewProducerWithNoSerializer(brokerList: String) : KafkaProducer[Array[Byte],Array[Byte]] = { - import org.apache.kafka.clients.producer.ProducerConfig - - val producerProps = new Properties() - producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList) - return new KafkaProducer[Array[Byte],Array[Byte]](producerProps) - } - - private def createNewProducerWithExplicitSerializer(brokerList: String) : KafkaProducer[Array[Byte],Array[Byte]] = { - import org.apache.kafka.clients.producer.ProducerConfig - - val producerProps = new Properties() - producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList) - return new KafkaProducer[Array[Byte],Array[Byte]](producerProps, new ByteArraySerializer, new ByteArraySerializer) - } - /** * testClose checks the closing behavior * @@ -191,8 +142,7 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { */ @Test def testClose() { - var producer = TestUtils.createNewProducer(brokerList) - + var producer = TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), enableSSL=true, trustStoreFile=Some(trustStoreFile)) try { // create topic TestUtils.createTopic(zkClient, topic, 1, 2, servers) @@ -227,8 +177,7 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { */ @Test def testSendToPartition() { - var producer = TestUtils.createNewProducer(brokerList) - + var producer = TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), enableSSL=true, trustStoreFile=Some(trustStoreFile)) try { // create topic val leaders = TestUtils.createTopic(zkClient, topic, 2, 2, servers) @@ -282,7 +231,7 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { */ @Test def testAutoCreateTopic() { - var producer = TestUtils.createNewProducer(brokerList, retries = 5) + var producer = TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), enableSSL=true, trustStoreFile=Some(trustStoreFile), retries = 5) try { // Send a message to auto-create the topic @@ -300,26 +249,6 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { } } - /** - * Test that flush immediately sends all accumulated requests. - */ - @Test - def testFlush() { - var producer = TestUtils.createNewProducer(brokerList, lingerMs = Long.MaxValue) - try { - TestUtils.createTopic(zkClient, topic, 2, 2, servers) - val record = new ProducerRecord[Array[Byte], Array[Byte]](topic, "value".getBytes) - for(i <- 0 until 50) { - val responses = (0 until numRecords) map (i => producer.send(record)) - assertTrue("No request is complete.", responses.forall(!_.isDone())) - producer.flush() - assertTrue("All requests are complete.", responses.forall(_.isDone())) - } - } finally { - if (producer != null) - producer.close() - } - } /** * Test close with zero timeout from caller thread @@ -339,7 +268,7 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { // Test closing from caller thread. for(i <- 0 until 50) { - producer = TestUtils.createNewProducer(brokerList, lingerMs = Long.MaxValue) + producer = TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), enableSSL=true, trustStoreFile=Some(trustStoreFile), lingerMs = Long.MaxValue) val responses = (0 until numRecords) map (i => producer.send(record0)) assertTrue("No request is complete.", responses.forall(!_.isDone())) producer.close(0, TimeUnit.MILLISECONDS) @@ -393,12 +322,14 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { producer.close(Long.MaxValue, TimeUnit.MICROSECONDS) } } + for(i <- 0 until 50) { - producer = TestUtils.createNewProducer(brokerList, lingerMs = Long.MaxValue) + //producer = TestUtils.createNewProducer(brokerList, lingerMs = Long.MaxValue) + producer = TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), enableSSL=true, trustStoreFile=Some(trustStoreFile), lingerMs = Long.MaxValue) // send message to partition 0 var responses = (0 until numRecords) map (i => producer.send(record0)) // send message to partition 1 - responses ++= ((0 until numRecords) map (i => producer.send(record1, new CloseCallback(producer)))) + responses ++= ((0 until numRecords) map (i => producer.send(record1))) assertTrue("No request is complete.", responses.forall(!_.isDone())) // flush the messages. producer.flush() @@ -415,7 +346,7 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { consumer2.fetch(new FetchRequestBuilder().addFetch(topic, 1, 0, Int.MaxValue).build()) } val expectedNumRecords = (i + 1) * numRecords - assertEquals("Fetch response to partition 0 should have %d messages.".format(expectedNumRecords), + assertEquals("Fetch response to partition 1 should have %d messages.".format(expectedNumRecords), expectedNumRecords, fetchResponse0.messageSet(topic, 0).size) assertEquals("Fetch response to partition 1 should have %d messages.".format(expectedNumRecords), expectedNumRecords, fetchResponse1.messageSet(topic, 1).size) -- 2.4.6 From cf63221bc43104ecbe960f356340f2bde2411d97 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Sat, 25 Jul 2015 10:42:55 -0700 Subject: [PATCH 27/30] KAFKA-1690. Addressing reviews. --- .../kafka/common/network/PlainTextTransportLayer.java | 9 +++++++-- .../org/apache/kafka/common/network/SSLTransportLayer.java | 14 +++++++++----- .../java/org/apache/kafka/common/network/Selector.java | 4 ++-- core/src/main/scala/kafka/network/SocketServer.scala | 2 +- 4 files changed, 19 insertions(+), 10 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java index cc281db..c445839 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java @@ -96,6 +96,7 @@ public class PlainTextTransportLayer implements TransportLayer { * implementation * @throws IOException */ + @Override public void handshake() throws IOException {} /** @@ -173,11 +174,11 @@ public class PlainTextTransportLayer implements TransportLayer { return socketChannel.write(srcs, offset, length); } - /** * always returns false as there will be not be any - * pending writes + * pending writes since we directly write to socketChannel. */ + @Override public boolean hasPendingWrites() { return false; } @@ -185,6 +186,7 @@ public class PlainTextTransportLayer implements TransportLayer { /** * Returns ANONYMOUS as Principal. */ + @Override public Principal peerPrincipal() throws IOException { return principal; } @@ -193,6 +195,7 @@ public class PlainTextTransportLayer implements TransportLayer { * Adds the interestOps to selectionKey. * @param interestOps */ + @Override public void addInterestOps(int ops) { key.interestOps(key.interestOps() | ops); @@ -202,10 +205,12 @@ public class PlainTextTransportLayer implements TransportLayer { * Removes the interestOps from selectionKey. * @param interestOps */ + @Override public void removeInterestOps(int ops) { key.interestOps(key.interestOps() & ~ops); } + @Override public boolean isMute() { return key.isValid() && (key.interestOps() & SelectionKey.OP_READ) == 0; } diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index b39524c..d805447 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -157,6 +157,7 @@ public class SSLTransportLayer implements TransportLayer { /** * returns true if there are any pending contents in netWriteBuffer */ + @Override public boolean hasPendingWrites() { return netWriteBuffer.remaining() != 0; } @@ -198,6 +199,7 @@ public class SSLTransportLayer implements TransportLayer { * * @throws IOException */ + @Override public void handshake() throws IOException { boolean read = key.isReadable(); boolean write = key.isWritable(); @@ -231,8 +233,7 @@ public class SSLTransportLayer implements TransportLayer { log.trace("SSLHandshake NEED_WRAP handshakeStatus ", channelId, handshakeResult); //if handshake status is not NEED_UNWRAP or unable to flush netWriteBuffer contents //we will break here otherwise we can do need_unwrap in the same call. - - if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || (!write && !flush(netWriteBuffer))) { + if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || (!write && !flush(netWriteBuffer))) { key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); break; } @@ -309,9 +310,11 @@ public class SSLTransportLayer implements TransportLayer { log.trace("SSLHandshake FINISHED", channelId); //we are complete if we have delivered the last package handshakeComplete = !netWriteBuffer.hasRemaining(); - //set interestOps if we are complete, otherwise we still have data to write + //remove OP_WRITE if we are complete, otherwise we still have data to write if (!handshakeComplete) key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); + else + key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); } else { throw new IOException("NOT_HANDSHAKING during handshake"); } @@ -381,8 +384,6 @@ public class SSLTransportLayer implements TransportLayer { } - - /** * Reads a sequence of bytes from this channel into the given buffer. * @@ -602,6 +603,7 @@ public class SSLTransportLayer implements TransportLayer { * Adds interestOps to SelectionKey of the TransportLayer * @param ops SelectionKey interestOps */ + @Override public void addInterestOps(int ops) { if (!key.isValid()) throw new CancelledKeyException(); @@ -615,6 +617,7 @@ public class SSLTransportLayer implements TransportLayer { * removes interestOps to SelectionKey of the TransportLayer * @param ops SelectionKey interestOps */ + @Override public void removeInterestOps(int ops) { if (!key.isValid()) throw new CancelledKeyException(); @@ -667,6 +670,7 @@ public class SSLTransportLayer implements TransportLayer { } } + @Override public boolean isMute() { return key.isValid() && (key.interestOps() & SelectionKey.OP_READ) == 0; } diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index fcfb1d5..dae3871 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -217,9 +217,9 @@ public class Selector implements Selectable { */ public void send(Send send) { KafkaChannel channel = channelForId(send.destination()); - if (channel == null) { + if (channel == null) throw new IllegalStateException("channel is not connected"); - } + try { channel.setSend(send); } catch (CancelledKeyException e) { diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index f80f7bb..a40c715 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -64,7 +64,7 @@ class SocketServer(val config: KafkaConfig, val metrics: Metrics, val time: Time this.logIdent = "[Socket Server on Broker " + config.brokerId + "], " val requestChannel = new RequestChannel(totalProcessorThreads, maxQueuedRequests) - val processors = new Array[Processor](totalProcessorThreads) + private val processors = new Array[Processor](totalProcessorThreads) private[network] var acceptors = mutable.Map[EndPoint,Acceptor]() -- 2.4.6 From 4a30d1e2522f5da26ad5f879a1632b18dcfdca35 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Sat, 1 Aug 2015 22:03:39 -0700 Subject: [PATCH 28/30] KAFKA-1690. Addressing reviews. --- .../main/java/org/apache/kafka/clients/ClientUtils.java | 7 +++---- .../java/org/apache/kafka/common/config/SSLConfigs.java | 4 ---- .../org/apache/kafka/common/network/ByteBufferSend.java | 4 ++-- .../apache/kafka/common/network/SSLTransportLayer.java | 6 ++++-- .../java/org/apache/kafka/common/network/Selector.java | 2 +- .../test/java/org/apache/kafka/test/TestSSLUtils.java | 4 ++-- core/src/main/scala/kafka/network/SocketServer.scala | 9 ++++----- .../scala/integration/kafka/api/ConsumerBounceTest.scala | 1 - .../scala/integration/kafka/api/SSLConsumerTest.scala | 16 ++++++++-------- .../integration/kafka/api/SSLProducerSendTest.scala | 6 +++--- core/src/test/scala/unit/kafka/utils/TestUtils.scala | 2 +- 11 files changed, 28 insertions(+), 33 deletions(-) diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index 33cb967..c02044e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -77,13 +77,12 @@ public class ClientUtils { ChannelBuilder channelBuilder = null; SecurityProtocol securityProtocol = SecurityProtocol.valueOf((String) configs.get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); - if (securityProtocol == SecurityProtocol.SSL) { + if (securityProtocol == SecurityProtocol.SSL) channelBuilder = new SSLChannelBuilder(SSLFactory.Mode.CLIENT); - } else if (securityProtocol == SecurityProtocol.PLAINTEXT) { + else if (securityProtocol == SecurityProtocol.PLAINTEXT) channelBuilder = new PlainTextChannelBuilder(); - } else { + else throw new ConfigException("Invalid SecurityProtocol " + CommonClientConfigs.SECURITY_PROTOCOL_CONFIG); - } channelBuilder.configure(configs); return channelBuilder; diff --git a/clients/src/main/java/org/apache/kafka/common/config/SSLConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/SSLConfigs.java index a02cf0c..dd7b71a 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/SSLConfigs.java +++ b/clients/src/main/java/org/apache/kafka/common/config/SSLConfigs.java @@ -99,8 +99,4 @@ public class SSLConfigs { + "Default value is false"; public static final Boolean DEFAULT_SSL_NEED_CLIENT_AUTH = false; - public static final String SSL_WANT_CLIENT_AUTH_CONFIG = "ssl.want.client.auth"; - public static final String SSL_WANT_CLIENT_AUTH_DOC = "If set to true kafka broker requests for client authentication. Clients without any certificates can still be able to connect using SSL."; - public static final Boolean DEFAULT_SSL_WANT_CLIENT_AUTH = false; - } diff --git a/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java b/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java index f409874..d7357b2 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ByteBufferSend.java @@ -61,9 +61,9 @@ public class ByteBufferSend implements Send { // This is temporary workaround. As Send , Receive interfaces are being used by BlockingChannel. // Once BlockingChannel is removed we can make Send, Receive to work with transportLayer rather than // GatheringByteChannel or ScatteringByteChannel. - if (channel instanceof TransportLayer) { + if (channel instanceof TransportLayer) pending = ((TransportLayer) channel).hasPendingWrites(); - } + return written; } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index d805447..5306051 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -407,6 +407,7 @@ public class SSLTransportLayer implements TransportLayer { if (netReadBuffer.remaining() > 0) { int netread = socketChannel.read(netReadBuffer); if (netread == 0) return netread; + else if (netread < 0) throw new EOFException("EOF during read"); } do { netReadBuffer.flip(); @@ -479,9 +480,10 @@ public class SSLTransportLayer implements TransportLayer { while (i < length) { if (dsts[i].hasRemaining()) { int read = read(dsts[i]); - if (read > 0) { + if (read > 0) totalRead += read; - } + else + break; } if (!dsts[i].hasRemaining()) { i++; diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index dae3871..327c5ca 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -458,7 +458,7 @@ public class Selector implements Selectable { */ @Override public boolean isChannelReady(String id) { - KafkaChannel channel = this.channels.get(id); + KafkaChannel channel = channelForId(id); return channel.ready(); } diff --git a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java index 08cd598..601e05d 100644 --- a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java +++ b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java @@ -178,7 +178,7 @@ public class TestSSLUtils { } public static Map createSSLConfig(SSLFactory.Mode mode, File keyStoreFile, String password, String keyPassword, - File trustStoreFile, String trustStorePassword, boolean useClientCert) { + File trustStoreFile, String trustStorePassword) { Map sslConfigs = new HashMap(); sslConfigs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL"); // kafka security protocol sslConfigs.put(SSLConfigs.SSL_PROTOCOL_CONFIG, "TLSv1.2"); // protocol to create SSLContext @@ -236,7 +236,7 @@ public class TestSSLUtils { } Map sslConfig = createSSLConfig(mode, keyStoreFile, password, - password, trustStoreFile, trustStorePassword, useClientCert); + password, trustStoreFile, trustStorePassword); return sslConfig; } diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index 10fc5e6..0b44d57 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -498,11 +498,10 @@ private[kafka] class Processor(val id: Int, } } - private def createChannelBuilder():ChannelBuilder = { - var channelBuilder:ChannelBuilder = new PlainTextChannelBuilder() - if (protocol == SecurityProtocol.SSL) { - channelBuilder = new SSLChannelBuilder(SSLFactory.Mode.SERVER) - } + private def createChannelBuilder(): ChannelBuilder = { + val channelBuilder:ChannelBuilder = if (protocol == SecurityProtocol.SSL) new SSLChannelBuilder(SSLFactory.Mode.SERVER) + else new PlainTextChannelBuilder() + channelBuilder.configure(channelConfigs) channelBuilder } diff --git a/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala index 76a7044..d8eee52 100644 --- a/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala +++ b/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala @@ -96,7 +96,6 @@ class ConsumerBounceTest extends IntegrationTestHarness with Logging { def testSeekAndCommitWithBrokerFailures() = seekAndCommitWithBrokerFailures(5) def seekAndCommitWithBrokerFailures(numIters: Int) { - println("testSeekAndCommitWithBrokerFailures") val numRecords = 1000 sendRecords(numRecords) this.producers.foreach(_.close) diff --git a/core/src/test/scala/integration/kafka/api/SSLConsumerTest.scala b/core/src/test/scala/integration/kafka/api/SSLConsumerTest.scala index cc342a9..0952b5f 100644 --- a/core/src/test/scala/integration/kafka/api/SSLConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/SSLConsumerTest.scala @@ -60,8 +60,8 @@ class SSLConsumerTest extends KafkaServerTestHarness with Logging { overridingProps.put(KafkaConfig.OffsetsTopicPartitionsProp, "1") overridingProps.put(KafkaConfig.ConsumerMinSessionTimeoutMsProp, "100") // set small enough session timeout - var consumers = Buffer[KafkaConsumer[Array[Byte], Array[Byte]]]() - var producers = Buffer[KafkaProducer[Array[Byte], Array[Byte]]]() + val consumers = Buffer[KafkaConsumer[Array[Byte], Array[Byte]]]() + val producers = Buffer[KafkaProducer[Array[Byte], Array[Byte]]]() def generateConfigs() = TestUtils.createBrokerConfigs(numServers, zkConnect, false, enableSSL=true, trustStoreFile=Some(trustStoreFile)).map(KafkaConfig.fromProps(_, overridingProps)) @@ -86,12 +86,12 @@ class SSLConsumerTest extends KafkaServerTestHarness with Logging { consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[org.apache.kafka.common.serialization.ByteArrayDeserializer]) consumerConfig.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, "range") - for(i <- 0 until producerCount) + for (i <- 0 until producerCount) producers += TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), acks = 1, enableSSL=true, trustStoreFile=Some(trustStoreFile)) - for(i <- 0 until consumerCount) + for (i <- 0 until consumerCount) consumers += TestUtils.createNewConsumer(TestUtils.getSSLBrokerListStrFromServers(servers), groupId = "my-test", partitionAssignmentStrategy= "range", @@ -210,12 +210,12 @@ class SSLConsumerTest extends KafkaServerTestHarness with Logging { trustStoreFile=Some(trustStoreFile)) consumer0.subscribe(topic) // the initial subscription should cause a callback execution - while(callback.callsToAssigned == 0) + while (callback.callsToAssigned == 0) consumer0.poll(50) // get metadata for the topic var parts = consumer0.partitionsFor(ConsumerCoordinator.OffsetsTopicName) - while(parts == null) + while (parts == null) parts = consumer0.partitionsFor(ConsumerCoordinator.OffsetsTopicName) assertEquals(1, parts.size) assertNotNull(parts(0).leader()) @@ -225,7 +225,7 @@ class SSLConsumerTest extends KafkaServerTestHarness with Logging { this.servers(coordinator).shutdown() // this should cause another callback execution - while(callback.callsToAssigned < 2) + while (callback.callsToAssigned < 2) consumer0.poll(50) assertEquals(2, callback.callsToAssigned) assertEquals(2, callback.callsToRevoked) @@ -262,7 +262,7 @@ class SSLConsumerTest extends KafkaServerTestHarness with Logging { for (record <- consumer.poll(50)) { records.add(record) } - if(iters > maxIters) + if (iters > maxIters) throw new IllegalStateException("Failed to consume the expected records after " + iters + " iterations.") iters += 1 } diff --git a/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala b/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala index 8d72fab..e5861ae 100644 --- a/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala +++ b/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala @@ -203,7 +203,7 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { } // make sure the fetched messages also respect the partitioning and ordering - val fetchResponse1 = if(leader1.get == configs(0).brokerId) { + val fetchResponse1 = if (leader1.get == configs(0).brokerId) { consumer1.fetch(new FetchRequestBuilder().addFetch(topic, partition, 0, Int.MaxValue).build()) } else { consumer2.fetch(new FetchRequestBuilder().addFetch(topic, partition, 0, Int.MaxValue).build()) @@ -267,7 +267,7 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { val record1 = new ProducerRecord[Array[Byte], Array[Byte]](topic, 1, null, "value".getBytes) // Test closing from caller thread. - for(i <- 0 until 50) { + for (i <- 0 until 50) { producer = TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), enableSSL=true, trustStoreFile=Some(trustStoreFile), lingerMs = Long.MaxValue) val responses = (0 until numRecords) map (i => producer.send(record0)) assertTrue("No request is complete.", responses.forall(!_.isDone())) @@ -323,7 +323,7 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { } } - for(i <- 0 until 50) { + for (i <- 0 until 50) { //producer = TestUtils.createNewProducer(brokerList, lingerMs = Long.MaxValue) producer = TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), enableSSL=true, trustStoreFile=Some(trustStoreFile), lingerMs = Long.MaxValue) // send message to partition 0 diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala index 85511a2..39dadc5 100755 --- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala +++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala @@ -924,7 +924,7 @@ object TestUtils extends Logging { if (mode == SSLFactory.Mode.SERVER) sslConfigs = TestSSLUtils.createSSLConfig(true, true, mode, trustStoreFile.get, certAlias) else - sslConfigs = TestSSLUtils.createSSLConfig(false, false, mode, trustStoreFile.get, certAlias) + sslConfigs = TestSSLUtils.createSSLConfig(clientCert, false, mode, trustStoreFile.get, certAlias) val sslProps = new Properties() sslConfigs.foreach(kv => -- 2.4.6 From b87045ae836818f0cda9f2b3d321e9423c4855ff Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Sun, 16 Aug 2015 17:45:11 -0700 Subject: [PATCH 29/30] KAFKA-1690. Add SSL support to broker, producer and consumer. --- build.gradle | 5 +- checkstyle/import-control.xml | 6 +- .../java/org/apache/kafka/clients/ClientUtils.java | 22 ++- .../org/apache/kafka/clients/NetworkClient.java | 9 +- .../apache/kafka/common/config/AbstractConfig.java | 4 +- .../kafka/common/network/ChannelBuilder.java | 6 +- .../apache/kafka/common/network/KafkaChannel.java | 2 +- .../common/network/PlainTextChannelBuilder.java | 58 ------ .../common/network/PlainTextTransportLayer.java | 217 --------------------- .../common/network/PlaintextChannelBuilder.java | 58 ++++++ .../common/network/PlaintextTransportLayer.java | 217 +++++++++++++++++++++ .../kafka/common/network/SSLChannelBuilder.java | 1 + .../apache/kafka/common/network/SSLFactory.java | 210 -------------------- .../kafka/common/network/SSLTransportLayer.java | 56 +++--- .../org/apache/kafka/common/network/Selector.java | 55 ++++-- .../kafka/common/security/ssl/SSLFactory.java | 210 ++++++++++++++++++++ .../apache/kafka/common/network/EchoServer.java | 1 + .../kafka/common/network/SSLFactoryTest.java | 60 ------ .../kafka/common/network/SSLSelectorTest.java | 1 + .../apache/kafka/common/network/SelectorTest.java | 2 +- .../kafka/common/security/ssl/SSLFactoryTest.java | 60 ++++++ .../java/org/apache/kafka/test/TestSSLUtils.java | 2 +- .../main/scala/kafka/network/SocketServer.scala | 9 +- .../integration/kafka/api/SSLConsumerTest.scala | 35 ---- .../kafka/api/SSLProducerSendTest.scala | 164 ++-------------- .../test/scala/unit/kafka/utils/TestUtils.scala | 2 +- 26 files changed, 670 insertions(+), 802 deletions(-) delete mode 100644 clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java create mode 100644 clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java create mode 100644 clients/src/main/java/org/apache/kafka/common/network/PlaintextTransportLayer.java delete mode 100644 clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java create mode 100644 clients/src/main/java/org/apache/kafka/common/security/ssl/SSLFactory.java delete mode 100644 clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java create mode 100644 clients/src/test/java/org/apache/kafka/common/security/ssl/SSLFactoryTest.java diff --git a/build.gradle b/build.gradle index e3c3a85..04ba6c3 100644 --- a/build.gradle +++ b/build.gradle @@ -242,12 +242,9 @@ project(':core') { testCompile 'org.easymock:easymock:3.0' testCompile 'org.objenesis:objenesis:1.2' testCompile 'org.bouncycastle:bcpkix-jdk15on:1.52' + testCompile "org.scalatest:scalatest_$baseScalaVersion:2.2.5" testCompile project(':clients') testCompile project(':clients').sourceSets.test.output - if (scalaVersion.startsWith('2.9')) - testCompile "org.scalatest:scalatest_$scalaVersion:1.9.1" - else - testCompile "org.scalatest:scalatest_$baseScalaVersion:2.2.5" testRuntime "$slf4jlog4j" diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index 0a70de2..96a551b 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -56,11 +56,13 @@ + - - + + + diff --git a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java index c02044e..ba3bcbe 100644 --- a/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/ClientUtils.java @@ -22,8 +22,8 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.kafka.common.protocol.SecurityProtocol; import org.apache.kafka.common.network.ChannelBuilder; import org.apache.kafka.common.network.SSLChannelBuilder; -import org.apache.kafka.common.network.PlainTextChannelBuilder; -import org.apache.kafka.common.network.SSLFactory; +import org.apache.kafka.common.network.PlaintextChannelBuilder; +import org.apache.kafka.common.security.ssl.SSLFactory; import org.apache.kafka.common.config.ConfigException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,15 +74,19 @@ public class ClientUtils { * returns ChannelBuilder configured channelBuilder based on the configs. */ public static ChannelBuilder createChannelBuilder(Map configs) { - ChannelBuilder channelBuilder = null; SecurityProtocol securityProtocol = SecurityProtocol.valueOf((String) configs.get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); + ChannelBuilder channelBuilder = null; - if (securityProtocol == SecurityProtocol.SSL) - channelBuilder = new SSLChannelBuilder(SSLFactory.Mode.CLIENT); - else if (securityProtocol == SecurityProtocol.PLAINTEXT) - channelBuilder = new PlainTextChannelBuilder(); - else - throw new ConfigException("Invalid SecurityProtocol " + CommonClientConfigs.SECURITY_PROTOCOL_CONFIG); + switch (securityProtocol) { + case SSL: + channelBuilder = new SSLChannelBuilder(SSLFactory.Mode.CLIENT); + break; + case PLAINTEXT: + channelBuilder = new PlaintextChannelBuilder(); + break; + default: + throw new ConfigException("Invalid SecurityProtocol " + CommonClientConfigs.SECURITY_PROTOCOL_CONFIG); + } channelBuilder.configure(configs); return channelBuilder; diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java index 0a472d4..85a0611 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java @@ -161,7 +161,7 @@ public class NetworkClient implements KafkaClient { return false; else // otherwise we are ready if we are connected and can send more requests - return isSendable(nodeId); + return canSendRequest(nodeId); } /** @@ -169,7 +169,7 @@ public class NetworkClient implements KafkaClient { * * @param node The node */ - private boolean isSendable(String node) { + private boolean canSendRequest(String node) { return connectionStates.isConnected(node) && selector.isChannelReady(node) && inFlightRequests.canSendMore(node); } @@ -191,7 +191,7 @@ public class NetworkClient implements KafkaClient { @Override public void send(ClientRequest request) { String nodeId = request.request().destination(); - if (!isSendable(nodeId)) + if (!canSendRequest(nodeId)) throw new IllegalStateException("Attempt to send a request to node " + nodeId + " which is not ready."); this.inFlightRequests.add(request); @@ -472,8 +472,7 @@ public class NetworkClient implements KafkaClient { } String nodeConnectionId = node.idString(); - - if (isSendable(nodeConnectionId)) { + if (canSendRequest(nodeConnectionId)) { Set topics = metadata.topics(); this.metadataFetchInProgress = true; ClientRequest metadataRequest = metadataRequest(now, nodeConnectionId, topics); diff --git a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java index 55fb047..d25c86e 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java +++ b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java @@ -103,9 +103,7 @@ public class AbstractConfig { } public Map values() { - Map copy = new HashMap(); - copy.putAll(values); - return values; + return new HashMap(values); } private void logAll() { diff --git a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java index 2629392..52a7aab 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/ChannelBuilder.java @@ -25,7 +25,7 @@ public interface ChannelBuilder { /** * Configure this class with the given key-value pairs */ - public void configure(Map configs) throws KafkaException; + void configure(Map configs) throws KafkaException; /** @@ -33,12 +33,12 @@ public interface ChannelBuilder { * @param id channel id * @param key SelectionKey */ - public KafkaChannel buildChannel(String id, SelectionKey key, int maxReceiveSize) throws KafkaException; + KafkaChannel buildChannel(String id, SelectionKey key, int maxReceiveSize) throws KafkaException; /** * Closes ChannelBuilder */ - public void close(); + void close(); } diff --git a/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java b/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java index d8de9f2..28a4f41 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java +++ b/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java @@ -32,7 +32,7 @@ import org.slf4j.LoggerFactory; public class KafkaChannel { private static final Logger log = LoggerFactory.getLogger(KafkaChannel.class); private final String id; - public TransportLayer transportLayer; + private TransportLayer transportLayer; private Authenticator authenticator; private NetworkReceive receive; private Send send; diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java deleted file mode 100644 index a8e1bb0..0000000 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextChannelBuilder.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package org.apache.kafka.common.network; - -import java.nio.channels.SelectionKey; -import java.util.Map; - -import org.apache.kafka.common.security.auth.PrincipalBuilder; -import org.apache.kafka.common.config.SSLConfigs; -import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.common.KafkaException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -public class PlainTextChannelBuilder implements ChannelBuilder { - private static final Logger log = LoggerFactory.getLogger(PlainTextChannelBuilder.class); - private PrincipalBuilder principalBuilder; - - public void configure(Map configs) throws KafkaException { - try { - this.principalBuilder = (PrincipalBuilder) Utils.newInstance((Class) configs.get(SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG)); - this.principalBuilder.configure(configs); - } catch (Exception e) { - throw new KafkaException(e); - } - } - - public KafkaChannel buildChannel(String id, SelectionKey key, int maxReceiveSize) throws KafkaException { - KafkaChannel channel = null; - try { - PlainTextTransportLayer transportLayer = new PlainTextTransportLayer(key); - Authenticator authenticator = new DefaultAuthenticator(); - authenticator.configure(transportLayer, this.principalBuilder); - channel = new KafkaChannel(id, transportLayer, authenticator, maxReceiveSize); - } catch (Exception e) { - log.warn("Failed to create channel due to ", e); - throw new KafkaException(e); - } - return channel; - } - - public void close() { - this.principalBuilder.close(); - } - -} diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java deleted file mode 100644 index c445839..0000000 --- a/clients/src/main/java/org/apache/kafka/common/network/PlainTextTransportLayer.java +++ /dev/null @@ -1,217 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.kafka.common.network; - -/* - * Transport layer for PLAINTEXT communication - */ - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.SocketChannel; -import java.nio.channels.SelectionKey; - -import java.security.Principal; - -import org.apache.kafka.common.security.auth.KafkaPrincipal; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class PlainTextTransportLayer implements TransportLayer { - private static final Logger log = LoggerFactory.getLogger(PlainTextTransportLayer.class); - private SelectionKey key; - private SocketChannel socketChannel; - private final Principal principal = new KafkaPrincipal("ANONYMOUS"); - - public PlainTextTransportLayer(SelectionKey key) throws IOException { - this.key = key; - this.socketChannel = (SocketChannel) key.channel(); - } - - @Override - public boolean ready() { - return true; - } - - @Override - public void finishConnect() throws IOException { - socketChannel.finishConnect(); - int ops = key.interestOps(); - ops &= ~SelectionKey.OP_CONNECT; - ops |= SelectionKey.OP_READ; - key.interestOps(ops); - } - - @Override - public void disconnect() { - key.cancel(); - } - - @Override - public SocketChannel socketChannel() { - return socketChannel; - } - - @Override - public boolean isOpen() { - return socketChannel.isOpen(); - } - - @Override - public boolean isConnected() { - return socketChannel.isConnected(); - } - - /** - * Closes this channel - * - * @throws IOException If and I/O error occurs - */ - @Override - public void close() throws IOException { - socketChannel.socket().close(); - socketChannel.close(); - key.attach(null); - key.cancel(); - } - - /** - * Performs SSL handshake hence is a no-op for the non-secure - * implementation - * @throws IOException - */ - @Override - public void handshake() throws IOException {} - - /** - * Reads a sequence of bytes from this channel into the given buffer. - * - * @param dst The buffer into which bytes are to be transferred - * @return The number of bytes read, possible zero or -1 if the channel has reached end-of-stream - * @throws IOException if some other I/O error occurs - */ - @Override - public int read(ByteBuffer dst) throws IOException { - return socketChannel.read(dst); - } - - /** - * Reads a sequence of bytes from this channel into the given buffers. - * - * @param dsts - The buffers into which bytes are to be transferred. - * @return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. - * @throws IOException if some other I/O error occurs - */ - @Override - public long read(ByteBuffer[] dsts) throws IOException { - return socketChannel.read(dsts); - } - - /** - * Reads a sequence of bytes from this channel into a subsequence of the given buffers. - * @param dsts - The buffers into which bytes are to be transferred - * @param offset - The offset within the buffer array of the first buffer into which bytes are to be transferred; must be non-negative and no larger than dsts.length. - * @param length - The maximum number of buffers to be accessed; must be non-negative and no larger than dsts.length - offset - * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. - * @throws IOException if some other I/O error occurs - */ - @Override - public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { - return socketChannel.read(dsts, offset, length); - } - - /** - * Writes a sequence of bytes to this channel from the given buffer. - * - * @param src The buffer from which bytes are to be retrieved - * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream - * @throws IOException If some other I/O error occurs - */ - @Override - public int write(ByteBuffer src) throws IOException { - return socketChannel.write(src); - } - - /** - * Writes a sequence of bytes to this channel from the given buffer. - * - * @param srcs The buffer from which bytes are to be retrieved - * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream - * @throws IOException If some other I/O error occurs - */ - @Override - public long write(ByteBuffer[] srcs) throws IOException { - return socketChannel.write(srcs); - } - - /** - * Writes a sequence of bytes to this channel from the subsequence of the given buffers. - * - * @param srcs The buffers from which bytes are to be retrieved - * @param offset The offset within the buffer array of the first buffer from which bytes are to be retrieved; must be non-negative and no larger than srcs.length. - * @param length - The maximum number of buffers to be accessed; must be non-negative and no larger than srcs.length - offset. - * @return returns no.of bytes written , possibly zero. - * @throws IOException If some other I/O error occurs - */ - @Override - public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { - return socketChannel.write(srcs, offset, length); - } - - /** - * always returns false as there will be not be any - * pending writes since we directly write to socketChannel. - */ - @Override - public boolean hasPendingWrites() { - return false; - } - - /** - * Returns ANONYMOUS as Principal. - */ - @Override - public Principal peerPrincipal() throws IOException { - return principal; - } - - /** - * Adds the interestOps to selectionKey. - * @param interestOps - */ - @Override - public void addInterestOps(int ops) { - key.interestOps(key.interestOps() | ops); - - } - - /** - * Removes the interestOps from selectionKey. - * @param interestOps - */ - @Override - public void removeInterestOps(int ops) { - key.interestOps(key.interestOps() & ~ops); - } - - @Override - public boolean isMute() { - return key.isValid() && (key.interestOps() & SelectionKey.OP_READ) == 0; - } -} diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java new file mode 100644 index 0000000..76dbf93 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package org.apache.kafka.common.network; + +import java.nio.channels.SelectionKey; +import java.util.Map; + +import org.apache.kafka.common.security.auth.PrincipalBuilder; +import org.apache.kafka.common.config.SSLConfigs; +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.common.KafkaException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +public class PlaintextChannelBuilder implements ChannelBuilder { + private static final Logger log = LoggerFactory.getLogger(PlaintextChannelBuilder.class); + private PrincipalBuilder principalBuilder; + + public void configure(Map configs) throws KafkaException { + try { + this.principalBuilder = (PrincipalBuilder) Utils.newInstance((Class) configs.get(SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG)); + this.principalBuilder.configure(configs); + } catch (Exception e) { + throw new KafkaException(e); + } + } + + public KafkaChannel buildChannel(String id, SelectionKey key, int maxReceiveSize) throws KafkaException { + KafkaChannel channel = null; + try { + PlaintextTransportLayer transportLayer = new PlaintextTransportLayer(key); + Authenticator authenticator = new DefaultAuthenticator(); + authenticator.configure(transportLayer, this.principalBuilder); + channel = new KafkaChannel(id, transportLayer, authenticator, maxReceiveSize); + } catch (Exception e) { + log.warn("Failed to create channel due to ", e); + throw new KafkaException(e); + } + return channel; + } + + public void close() { + this.principalBuilder.close(); + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlaintextTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/PlaintextTransportLayer.java new file mode 100644 index 0000000..a3567af --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/network/PlaintextTransportLayer.java @@ -0,0 +1,217 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.network; + +/* + * Transport layer for PLAINTEXT communication + */ + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SocketChannel; +import java.nio.channels.SelectionKey; + +import java.security.Principal; + +import org.apache.kafka.common.security.auth.KafkaPrincipal; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PlaintextTransportLayer implements TransportLayer { + private static final Logger log = LoggerFactory.getLogger(PlaintextTransportLayer.class); + private final SelectionKey key; + private final SocketChannel socketChannel; + private final Principal principal = new KafkaPrincipal("ANONYMOUS"); + + public PlaintextTransportLayer(SelectionKey key) throws IOException { + this.key = key; + this.socketChannel = (SocketChannel) key.channel(); + } + + @Override + public boolean ready() { + return true; + } + + @Override + public void finishConnect() throws IOException { + socketChannel.finishConnect(); + int ops = key.interestOps(); + ops &= ~SelectionKey.OP_CONNECT; + ops |= SelectionKey.OP_READ; + key.interestOps(ops); + } + + @Override + public void disconnect() { + key.cancel(); + } + + @Override + public SocketChannel socketChannel() { + return socketChannel; + } + + @Override + public boolean isOpen() { + return socketChannel.isOpen(); + } + + @Override + public boolean isConnected() { + return socketChannel.isConnected(); + } + + /** + * Closes this channel + * + * @throws IOException If I/O error occurs + */ + @Override + public void close() throws IOException { + socketChannel.socket().close(); + socketChannel.close(); + key.attach(null); + key.cancel(); + } + + /** + * Performs SSL handshake hence is a no-op for the non-secure + * implementation + * @throws IOException + */ + @Override + public void handshake() throws IOException {} + + /** + * Reads a sequence of bytes from this channel into the given buffer. + * + * @param dst The buffer into which bytes are to be transferred + * @return The number of bytes read, possible zero or -1 if the channel has reached end-of-stream + * @throws IOException if some other I/O error occurs + */ + @Override + public int read(ByteBuffer dst) throws IOException { + return socketChannel.read(dst); + } + + /** + * Reads a sequence of bytes from this channel into the given buffers. + * + * @param dsts - The buffers into which bytes are to be transferred. + * @return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. + * @throws IOException if some other I/O error occurs + */ + @Override + public long read(ByteBuffer[] dsts) throws IOException { + return socketChannel.read(dsts); + } + + /** + * Reads a sequence of bytes from this channel into a subsequence of the given buffers. + * @param dsts - The buffers into which bytes are to be transferred + * @param offset - The offset within the buffer array of the first buffer into which bytes are to be transferred; must be non-negative and no larger than dsts.length. + * @param length - The maximum number of buffers to be accessed; must be non-negative and no larger than dsts.length - offset + * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. + * @throws IOException if some other I/O error occurs + */ + @Override + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + return socketChannel.read(dsts, offset, length); + } + + /** + * Writes a sequence of bytes to this channel from the given buffer. + * + * @param src The buffer from which bytes are to be retrieved + * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream + * @throws IOException If some other I/O error occurs + */ + @Override + public int write(ByteBuffer src) throws IOException { + return socketChannel.write(src); + } + + /** + * Writes a sequence of bytes to this channel from the given buffer. + * + * @param srcs The buffer from which bytes are to be retrieved + * @returns The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream + * @throws IOException If some other I/O error occurs + */ + @Override + public long write(ByteBuffer[] srcs) throws IOException { + return socketChannel.write(srcs); + } + + /** + * Writes a sequence of bytes to this channel from the subsequence of the given buffers. + * + * @param srcs The buffers from which bytes are to be retrieved + * @param offset The offset within the buffer array of the first buffer from which bytes are to be retrieved; must be non-negative and no larger than srcs.length. + * @param length - The maximum number of buffers to be accessed; must be non-negative and no larger than srcs.length - offset. + * @return returns no.of bytes written , possibly zero. + * @throws IOException If some other I/O error occurs + */ + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + return socketChannel.write(srcs, offset, length); + } + + /** + * always returns false as there will be not be any + * pending writes since we directly write to socketChannel. + */ + @Override + public boolean hasPendingWrites() { + return false; + } + + /** + * Returns ANONYMOUS as Principal. + */ + @Override + public Principal peerPrincipal() throws IOException { + return principal; + } + + /** + * Adds the interestOps to selectionKey. + * @param interestOps + */ + @Override + public void addInterestOps(int ops) { + key.interestOps(key.interestOps() | ops); + + } + + /** + * Removes the interestOps from selectionKey. + * @param interestOps + */ + @Override + public void removeInterestOps(int ops) { + key.interestOps(key.interestOps() & ~ops); + } + + @Override + public boolean isMute() { + return key.isValid() && (key.interestOps() & SelectionKey.OP_READ) == 0; + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java index 2e0525c..88c218b 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLChannelBuilder.java @@ -17,6 +17,7 @@ import java.nio.channels.SocketChannel; import java.util.Map; import org.apache.kafka.common.security.auth.PrincipalBuilder; +import org.apache.kafka.common.security.ssl.SSLFactory; import org.apache.kafka.common.config.SSLConfigs; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.common.KafkaException; diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java b/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java deleted file mode 100644 index 7cc0c3b..0000000 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLFactory.java +++ /dev/null @@ -1,210 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.common.network; - -import java.util.Map; -import java.util.List; -import java.io.FileInputStream; -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.security.KeyStore; - -import javax.net.ssl.*; - -import org.apache.kafka.common.KafkaException; -import org.apache.kafka.common.Configurable; -import org.apache.kafka.common.config.SSLConfigs; - - -public class SSLFactory implements Configurable { - - public enum Mode { CLIENT, SERVER }; - private String protocol; - private String provider; - private String kmfAlgorithm; - private String tmfAlgorithm; - private SecurityStore keystore = null; - private String keyPassword; - private SecurityStore truststore; - private String[] cipherSuites; - private String[] enabledProtocols; - private String endpointIdentification; - private SSLContext sslContext; - private boolean needClientAuth; - private boolean wantClientAuth; - private Mode mode; - - - public SSLFactory(Mode mode) { - this.mode = mode; - } - - @Override - public void configure(Map configs) throws KafkaException { - this.protocol = (String) configs.get(SSLConfigs.SSL_PROTOCOL_CONFIG); - this.provider = (String) configs.get(SSLConfigs.SSL_PROVIDER_CONFIG); - - if (configs.get(SSLConfigs.SSL_CIPHER_SUITES_CONFIG) != null) { - List cipherSuitesList = (List) configs.get(SSLConfigs.SSL_CIPHER_SUITES_CONFIG); - this.cipherSuites = (String[]) cipherSuitesList.toArray(new String[cipherSuitesList.size()]); - } - - if (configs.get(SSLConfigs.SSL_ENABLED_PROTOCOLS_CONFIG) != null) { - List enabledProtocolsList = (List) configs.get(SSLConfigs.SSL_ENABLED_PROTOCOLS_CONFIG); - this.enabledProtocols = (String[]) enabledProtocolsList.toArray(new String[enabledProtocolsList.size()]); - } - - if (configs.containsKey(SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG)) { - this.endpointIdentification = (String) configs.get(SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG); - } - - if (configs.containsKey(SSLConfigs.SSL_CLIENT_AUTH_CONFIG)) { - String clientAuthConfig = (String) configs.get(SSLConfigs.SSL_CLIENT_AUTH_CONFIG); - if (clientAuthConfig.equals("required")) - this.needClientAuth = true; - else if (clientAuthConfig.equals("requested")) - this.wantClientAuth = true; - } - - this.kmfAlgorithm = (String) configs.get(SSLConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG); - this.tmfAlgorithm = (String) configs.get(SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG); - - if (checkKeyStoreConfigs(configs)) { - createKeystore((String) configs.get(SSLConfigs.SSL_KEYSTORE_TYPE_CONFIG), - (String) configs.get(SSLConfigs.SSL_KEYSTORE_LOCATION_CONFIG), - (String) configs.get(SSLConfigs.SSL_KEYSTORE_PASSWORD_CONFIG), - (String) configs.get(SSLConfigs.SSL_KEY_PASSWORD_CONFIG)); - } - - createTruststore((String) configs.get(SSLConfigs.SSL_TRUSTSTORE_TYPE_CONFIG), - (String) configs.get(SSLConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG), - (String) configs.get(SSLConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG)); - try { - this.sslContext = createSSLContext(); - } catch (Exception e) { - throw new KafkaException(e); - } - } - - - private SSLContext createSSLContext() throws GeneralSecurityException, IOException { - SSLContext sslContext; - if (provider != null) - sslContext = SSLContext.getInstance(protocol, provider); - else - sslContext = SSLContext.getInstance(protocol); - - KeyManager[] keyManagers = null; - if (keystore != null) { - String kmfAlgorithm = this.kmfAlgorithm != null ? this.kmfAlgorithm : KeyManagerFactory.getDefaultAlgorithm(); - KeyManagerFactory kmf = KeyManagerFactory.getInstance(kmfAlgorithm); - KeyStore ks = keystore.load(); - String keyPassword = this.keyPassword != null ? this.keyPassword : keystore.password; - kmf.init(ks, keyPassword.toCharArray()); - keyManagers = kmf.getKeyManagers(); - } - - String tmfAlgorithm = this.tmfAlgorithm != null ? this.tmfAlgorithm : TrustManagerFactory.getDefaultAlgorithm(); - TrustManagerFactory tmf = TrustManagerFactory.getInstance(tmfAlgorithm); - KeyStore ts = truststore == null ? null : truststore.load(); - tmf.init(ts); - - sslContext.init(keyManagers, tmf.getTrustManagers(), null); - return sslContext; - } - - public SSLEngine createSSLEngine(String peerHost, int peerPort) { - SSLEngine sslEngine = sslContext.createSSLEngine(peerHost, peerPort); - if (cipherSuites != null) sslEngine.setEnabledCipherSuites(cipherSuites); - if (enabledProtocols != null) sslEngine.setEnabledProtocols(enabledProtocols); - - if (mode == Mode.SERVER) { - sslEngine.setUseClientMode(false); - if (needClientAuth) - sslEngine.setNeedClientAuth(needClientAuth); - else - sslEngine.setWantClientAuth(wantClientAuth); - } else { - sslEngine.setUseClientMode(true); - SSLParameters sslParams = sslEngine.getSSLParameters(); - sslParams.setEndpointIdentificationAlgorithm(endpointIdentification); - sslEngine.setSSLParameters(sslParams); - } - return sslEngine; - } - - /** - * Returns a configured SSLContext. - * @return SSLContext. - */ - public SSLContext sslContext() { - return sslContext; - } - - private void createKeystore(String type, String path, String password, String keyPassword) { - if (path == null && password != null) { - throw new KafkaException("SSL key store password is not specified."); - } else if (path != null && password == null) { - throw new KafkaException("SSL key store is not specified, but key store password is specified."); - } else if (path != null && password != null) { - this.keystore = new SecurityStore(type, path, password); - this.keyPassword = keyPassword; - } - } - - private void createTruststore(String type, String path, String password) { - if (path == null && password != null) { - throw new KafkaException("SSL key store password is not specified."); - } else if (path != null && password == null) { - throw new KafkaException("SSL key store is not specified, but key store password is specified."); - } else if (path != null && password != null) { - this.truststore = new SecurityStore(type, path, password); - } - } - - private boolean checkKeyStoreConfigs(Map configs) { - return configs.containsKey(SSLConfigs.SSL_KEYSTORE_TYPE_CONFIG) && - configs.containsKey(SSLConfigs.SSL_KEYSTORE_LOCATION_CONFIG) && - configs.containsKey(SSLConfigs.SSL_KEYSTORE_PASSWORD_CONFIG) && - configs.containsKey(SSLConfigs.SSL_KEY_PASSWORD_CONFIG); - } - - private class SecurityStore { - private final String type; - private final String path; - private final String password; - - private SecurityStore(String type, String path, String password) { - this.type = type == null ? KeyStore.getDefaultType() : type; - this.path = path; - this.password = password; - } - - private KeyStore load() throws GeneralSecurityException, IOException { - FileInputStream in = null; - try { - KeyStore ks = KeyStore.getInstance(type); - in = new FileInputStream(path); - ks.load(in, password.toCharArray()); - return ks; - } finally { - if (in != null) in.close(); - } - } - } - -} diff --git a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java index 5306051..2f29dac 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SSLTransportLayer.java @@ -43,10 +43,10 @@ import org.slf4j.LoggerFactory; public class SSLTransportLayer implements TransportLayer { private static final Logger log = LoggerFactory.getLogger(SSLTransportLayer.class); - private String channelId; - protected SSLEngine sslEngine; - private SelectionKey key; - private SocketChannel socketChannel; + private final String channelId; + protected final SSLEngine sslEngine; + private final SelectionKey key; + private final SocketChannel socketChannel; private HandshakeStatus handshakeStatus; private SSLEngineResult handshakeResult; private boolean handshakeComplete = false; @@ -61,9 +61,9 @@ public class SSLTransportLayer implements TransportLayer { this.key = key; this.socketChannel = (SocketChannel) key.channel(); this.sslEngine = sslEngine; - this.netReadBuffer = ByteBuffer.allocateDirect(packetBufferSize()); - this.netWriteBuffer = ByteBuffer.allocateDirect(packetBufferSize()); - this.appReadBuffer = ByteBuffer.allocateDirect(applicationBufferSize()); + this.netReadBuffer = ByteBuffer.allocate(packetBufferSize()); + this.netWriteBuffer = ByteBuffer.allocate(packetBufferSize()); + this.appReadBuffer = ByteBuffer.allocate(applicationBufferSize()); startHandshake(); } @@ -145,11 +145,11 @@ public class SSLTransportLayer implements TransportLayer { } netWriteBuffer.flip(); flush(netWriteBuffer); + socketChannel.socket().close(); + socketChannel.close(); } catch (IOException ie) { log.warn("Failed to send SSL Close message ", ie); } - socketChannel.socket().close(); - socketChannel.close(); key.attach(null); key.cancel(); } @@ -159,7 +159,7 @@ public class SSLTransportLayer implements TransportLayer { */ @Override public boolean hasPendingWrites() { - return netWriteBuffer.remaining() != 0; + return netWriteBuffer.hasRemaining(); } /** @@ -212,11 +212,13 @@ public class SSLTransportLayer implements TransportLayer { try { switch (handshakeStatus) { case NEED_TASK: - log.trace("SSLHandshake NEED_TASK", channelId); + log.trace("SSLHandshake NEED_TASK channelId {}, appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {}", + channelId, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position()); handshakeStatus = runDelegatedTasks(); break; case NEED_WRAP: - log.trace("SSLHandshake NEED_WRAP", channelId); + log.trace("SSLHandshake NEED_WRAP channelId {}, appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {}", + channelId, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position()); handshakeResult = handshakeWrap(write); if (handshakeResult.getStatus() == Status.BUFFER_OVERFLOW) { int currentPacketBufferSize = packetBufferSize(); @@ -230,15 +232,17 @@ public class SSLTransportLayer implements TransportLayer { } else if (handshakeResult.getStatus() == Status.CLOSED) { throw new EOFException(); } - log.trace("SSLHandshake NEED_WRAP handshakeStatus ", channelId, handshakeResult); + log.trace("SSLHandshake NEED_WRAP channelId {}, handshakeResult {}, appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {}", + channelId, handshakeResult, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position()); //if handshake status is not NEED_UNWRAP or unable to flush netWriteBuffer contents //we will break here otherwise we can do need_unwrap in the same call. - if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || (!write && !flush(netWriteBuffer))) { + if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || !flush(netWriteBuffer)) { key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); break; } case NEED_UNWRAP: - log.trace("SSLHandshake NEED_UNWRAP", channelId); + log.trace("SSLHandshake NEED_UNWRAP {}, appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {}", + channelId, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position()); handshakeResult = handshakeUnwrap(read); if (handshakeResult.getStatus() == Status.BUFFER_UNDERFLOW) { int currentPacketBufferSize = packetBufferSize(); @@ -256,6 +260,9 @@ public class SSLTransportLayer implements TransportLayer { } else if (handshakeResult.getStatus() == Status.CLOSED) { throw new EOFException("SSL handshake status CLOSED during handshake UNWRAP"); } + log.trace("SSLHandshake NEED_UNWRAP channelId {}, handshakeResult {}, appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {}", + channelId, handshakeResult, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position()); + //if handshakeStatus completed than fall-through to finished status. //after handshake is finished there is no data left to read/write in socketChannel. //so the selector won't invoke this channel if we don't go through the handshakeFinished here. @@ -307,7 +314,6 @@ public class SSLTransportLayer implements TransportLayer { // It can move from FINISHED status to NOT_HANDSHAKING after the handshake is completed. // Hence we also need to check handshakeResult.getHandshakeStatus() if the handshake finished or not if (handshakeResult.getHandshakeStatus() == HandshakeStatus.FINISHED) { - log.trace("SSLHandshake FINISHED", channelId); //we are complete if we have delivered the last package handshakeComplete = !netWriteBuffer.hasRemaining(); //remove OP_WRITE if we are complete, otherwise we still have data to write @@ -315,6 +321,9 @@ public class SSLTransportLayer implements TransportLayer { key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); else key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); + + log.trace("SSLHandshake FINISHED {}, appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {} ", + channelId, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position()); } else { throw new IOException("NOT_HANDSHAKING during handshake"); } @@ -341,7 +350,7 @@ public class SSLTransportLayer implements TransportLayer { result.getHandshakeStatus() == HandshakeStatus.NEED_TASK) { handshakeStatus = runDelegatedTasks(); } - //optimization, if we do have a writable channel, write it now + if (doWrite) flush(netWriteBuffer); return result; } @@ -354,10 +363,6 @@ public class SSLTransportLayer implements TransportLayer { */ private SSLEngineResult handshakeUnwrap(Boolean doRead) throws IOException { log.trace("SSLHandshake handshakeUnwrap", channelId); - if (netReadBuffer.position() == netReadBuffer.limit()) { - //clear the buffer if we have emptied it out on data - netReadBuffer.clear(); - } SSLEngineResult result; boolean cont = false; int read = 0; @@ -372,7 +377,7 @@ public class SSLTransportLayer implements TransportLayer { netReadBuffer.compact(); handshakeStatus = result.getHandshakeStatus(); if (result.getStatus() == SSLEngineResult.Status.OK && - result.getHandshakeStatus() == HandshakeStatus.NEED_TASK) { + result.getHandshakeStatus() == HandshakeStatus.NEED_TASK) { handshakeStatus = runDelegatedTasks(); } cont = result.getStatus() == SSLEngineResult.Status.OK && @@ -415,6 +420,8 @@ public class SSLTransportLayer implements TransportLayer { netReadBuffer.compact(); // handle ssl renegotiation. if (unwrapResult.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) { + log.trace("SSLChannel Read begin renegotiation channelId {}, appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {}", + channelId, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position()); handshake(); break; } @@ -428,10 +435,7 @@ public class SSLTransportLayer implements TransportLayer { throw new IllegalStateException("Buffer overflow when available data size (" + appReadBuffer.position() + ") >= application buffer size (" + currentApplicationBufferSize + ")"); } - if (dst.hasRemaining()) - read += readFromAppBuffer(dst); - else - break; + break; } else if (unwrapResult.getStatus() == Status.BUFFER_UNDERFLOW) { int currentPacketBufferSize = packetBufferSize(); netReadBuffer = Utils.ensureCapacity(netReadBuffer, currentPacketBufferSize); diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index 327c5ca..90ee226 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -237,6 +237,17 @@ public class Selector implements Selectable { * lists will be cleared at the beginning of each {@link #poll(long, List)} call and repopulated by the call if any * completed I/O. * + * In the "Plaintext" setting, we are using socketChannel to read & write to the network. But for the "SSL" setting, + * we encrypt the data before we use socketChannel to write data to the network, and decrypt before we return the responses. + * This requires additional buffers to be maintained as we are reading from network, since the data on the wire is encrpyted + * we won't be able to read exact no.of bytes as kafka protocol requires. We read as many bytes as we can, up to SSLEngine's + * application buffer size. This means we might be reading additional bytes than the requested size. + * If there is no further data to read from socketChannel selector won't invoke that channel and we've have additional bytes + * in the buffer. To overcome this issue we added "stagedReceives" map which contains per-channel deque. When we are + * reading a channel we read as many responses as we can and store them into "stagedReceives" and pop one response during + * the poll to add the completedReceives. If there are any active channels in the "stagedReceives" we set "timeout" to 0 + * and pop response and add to the completedReceives. + * * @param timeout The amount of time to wait, in milliseconds. If negative, wait indefinitely. * @throws IllegalStateException If a send is given for which we have no existing connection or for which there is * already an in-progress send @@ -244,7 +255,7 @@ public class Selector implements Selectable { @Override public void poll(long timeout) throws IOException { clear(); - if (this.stagedReceives.size() > 0) + if (hasStagedReceives()) timeout = 0; /* check ready keys */ long startSelect = time.nanoseconds(); @@ -278,13 +289,12 @@ public class Selector implements Selectable { channel.prepare(); /* if channel is ready read from any connections that have readable data */ - if (channel.ready() && key.isReadable()) { + if (channel.ready() && key.isReadable() && !hasStagedReceive(channel)) { NetworkReceive networkReceive; try { while ((networkReceive = channel.read()) != null) { addToStagedReceives(channel, networkReceive); } - addToCompletedReceives(channel); } catch (InvalidReceiveException e) { log.error("Invalid data received from " + channel.id() + " closing connection", e); close(channel); @@ -317,9 +327,10 @@ public class Selector implements Selectable { this.disconnected.add(channel.id()); } } - } else { - addToCompletedReceives(); } + + addToCompletedReceives(); + long endIo = time.nanoseconds(); this.sensors.ioTime.record(endIo - endSelect, time.milliseconds()); maybeCloseOldestConnection(); @@ -480,6 +491,25 @@ public class Selector implements Selectable { } /** + * Check if given channel has a staged receive + */ + private boolean hasStagedReceive(KafkaChannel channel) { + return stagedReceives.containsKey(channel); + } + + /** + * check if stagedReceives have unmuted channel + */ + private boolean hasStagedReceives() { + for (KafkaChannel channel : this.stagedReceives.keySet()) { + if (!channel.isMute()) + return true; + } + return false; + } + + + /** * adds a receive to staged receieves */ private void addToStagedReceives(KafkaChannel channel, NetworkReceive receive) { @@ -499,7 +529,7 @@ public class Selector implements Selectable { while (iter.hasNext()) { Map.Entry> entry = iter.next(); KafkaChannel channel = entry.getKey(); - if (!channel.hasSend() && !channel.isMute()) { + if (!channel.isMute()) { Deque deque = entry.getValue(); NetworkReceive networkReceive = deque.poll(); this.completedReceives.add(networkReceive); @@ -511,19 +541,6 @@ public class Selector implements Selectable { } } - /** - * checks if there are any staged receives and adds to completedReceives - */ - private void addToCompletedReceives(KafkaChannel channel) { - Deque deque = this.stagedReceives.get(channel); - if (!channel.hasSend() && deque != null) { - NetworkReceive networkReceive = deque.poll(); - this.completedReceives.add(networkReceive); - this.sensors.recordBytesReceived(channel.id(), networkReceive.payload().limit()); - if (deque.size() == 0) - this.stagedReceives.remove(channel); - } - } private class SelectorMetrics { private final Metrics metrics; diff --git a/clients/src/main/java/org/apache/kafka/common/security/ssl/SSLFactory.java b/clients/src/main/java/org/apache/kafka/common/security/ssl/SSLFactory.java new file mode 100644 index 0000000..f79b65c --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/ssl/SSLFactory.java @@ -0,0 +1,210 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.security.ssl; + +import java.util.Map; +import java.util.List; +import java.io.FileInputStream; +import java.io.IOException; +import java.security.GeneralSecurityException; +import java.security.KeyStore; + +import javax.net.ssl.*; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.Configurable; +import org.apache.kafka.common.config.SSLConfigs; + + +public class SSLFactory implements Configurable { + + public enum Mode { CLIENT, SERVER }; + private String protocol; + private String provider; + private String kmfAlgorithm; + private String tmfAlgorithm; + private SecurityStore keystore = null; + private String keyPassword; + private SecurityStore truststore; + private String[] cipherSuites; + private String[] enabledProtocols; + private String endpointIdentification; + private SSLContext sslContext; + private boolean needClientAuth; + private boolean wantClientAuth; + private final Mode mode; + + + public SSLFactory(Mode mode) { + this.mode = mode; + } + + @Override + public void configure(Map configs) throws KafkaException { + this.protocol = (String) configs.get(SSLConfigs.SSL_PROTOCOL_CONFIG); + this.provider = (String) configs.get(SSLConfigs.SSL_PROVIDER_CONFIG); + + if (configs.get(SSLConfigs.SSL_CIPHER_SUITES_CONFIG) != null) { + List cipherSuitesList = (List) configs.get(SSLConfigs.SSL_CIPHER_SUITES_CONFIG); + this.cipherSuites = (String[]) cipherSuitesList.toArray(new String[cipherSuitesList.size()]); + } + + if (configs.get(SSLConfigs.SSL_ENABLED_PROTOCOLS_CONFIG) != null) { + List enabledProtocolsList = (List) configs.get(SSLConfigs.SSL_ENABLED_PROTOCOLS_CONFIG); + this.enabledProtocols = (String[]) enabledProtocolsList.toArray(new String[enabledProtocolsList.size()]); + } + + if (configs.containsKey(SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG)) { + this.endpointIdentification = (String) configs.get(SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG); + } + + if (configs.containsKey(SSLConfigs.SSL_CLIENT_AUTH_CONFIG)) { + String clientAuthConfig = (String) configs.get(SSLConfigs.SSL_CLIENT_AUTH_CONFIG); + if (clientAuthConfig.equals("required")) + this.needClientAuth = true; + else if (clientAuthConfig.equals("requested")) + this.wantClientAuth = true; + } + + this.kmfAlgorithm = (String) configs.get(SSLConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG); + this.tmfAlgorithm = (String) configs.get(SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG); + + if (checkKeyStoreConfigs(configs)) { + createKeystore((String) configs.get(SSLConfigs.SSL_KEYSTORE_TYPE_CONFIG), + (String) configs.get(SSLConfigs.SSL_KEYSTORE_LOCATION_CONFIG), + (String) configs.get(SSLConfigs.SSL_KEYSTORE_PASSWORD_CONFIG), + (String) configs.get(SSLConfigs.SSL_KEY_PASSWORD_CONFIG)); + } + + createTruststore((String) configs.get(SSLConfigs.SSL_TRUSTSTORE_TYPE_CONFIG), + (String) configs.get(SSLConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG), + (String) configs.get(SSLConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG)); + try { + this.sslContext = createSSLContext(); + } catch (Exception e) { + throw new KafkaException(e); + } + } + + + private SSLContext createSSLContext() throws GeneralSecurityException, IOException { + SSLContext sslContext; + if (provider != null) + sslContext = SSLContext.getInstance(protocol, provider); + else + sslContext = SSLContext.getInstance(protocol); + + KeyManager[] keyManagers = null; + if (keystore != null) { + String kmfAlgorithm = this.kmfAlgorithm != null ? this.kmfAlgorithm : KeyManagerFactory.getDefaultAlgorithm(); + KeyManagerFactory kmf = KeyManagerFactory.getInstance(kmfAlgorithm); + KeyStore ks = keystore.load(); + String keyPassword = this.keyPassword != null ? this.keyPassword : keystore.password; + kmf.init(ks, keyPassword.toCharArray()); + keyManagers = kmf.getKeyManagers(); + } + + String tmfAlgorithm = this.tmfAlgorithm != null ? this.tmfAlgorithm : TrustManagerFactory.getDefaultAlgorithm(); + TrustManagerFactory tmf = TrustManagerFactory.getInstance(tmfAlgorithm); + KeyStore ts = truststore == null ? null : truststore.load(); + tmf.init(ts); + + sslContext.init(keyManagers, tmf.getTrustManagers(), null); + return sslContext; + } + + public SSLEngine createSSLEngine(String peerHost, int peerPort) { + SSLEngine sslEngine = sslContext.createSSLEngine(peerHost, peerPort); + if (cipherSuites != null) sslEngine.setEnabledCipherSuites(cipherSuites); + if (enabledProtocols != null) sslEngine.setEnabledProtocols(enabledProtocols); + + if (mode == Mode.SERVER) { + sslEngine.setUseClientMode(false); + if (needClientAuth) + sslEngine.setNeedClientAuth(needClientAuth); + else + sslEngine.setWantClientAuth(wantClientAuth); + } else { + sslEngine.setUseClientMode(true); + SSLParameters sslParams = sslEngine.getSSLParameters(); + sslParams.setEndpointIdentificationAlgorithm(endpointIdentification); + sslEngine.setSSLParameters(sslParams); + } + return sslEngine; + } + + /** + * Returns a configured SSLContext. + * @return SSLContext. + */ + public SSLContext sslContext() { + return sslContext; + } + + private void createKeystore(String type, String path, String password, String keyPassword) { + if (path == null && password != null) { + throw new KafkaException("SSL key store password is not specified."); + } else if (path != null && password == null) { + throw new KafkaException("SSL key store is not specified, but key store password is specified."); + } else if (path != null && password != null) { + this.keystore = new SecurityStore(type, path, password); + this.keyPassword = keyPassword; + } + } + + private void createTruststore(String type, String path, String password) { + if (path == null && password != null) { + throw new KafkaException("SSL key store password is not specified."); + } else if (path != null && password == null) { + throw new KafkaException("SSL key store is not specified, but key store password is specified."); + } else if (path != null && password != null) { + this.truststore = new SecurityStore(type, path, password); + } + } + + private boolean checkKeyStoreConfigs(Map configs) { + return configs.containsKey(SSLConfigs.SSL_KEYSTORE_TYPE_CONFIG) && + configs.containsKey(SSLConfigs.SSL_KEYSTORE_LOCATION_CONFIG) && + configs.containsKey(SSLConfigs.SSL_KEYSTORE_PASSWORD_CONFIG) && + configs.containsKey(SSLConfigs.SSL_KEY_PASSWORD_CONFIG); + } + + private class SecurityStore { + private final String type; + private final String path; + private final String password; + + private SecurityStore(String type, String path, String password) { + this.type = type == null ? KeyStore.getDefaultType() : type; + this.path = path; + this.password = password; + } + + private KeyStore load() throws GeneralSecurityException, IOException { + FileInputStream in = null; + try { + KeyStore ks = KeyStore.getInstance(type); + in = new FileInputStream(path); + ks.load(in, password.toCharArray()); + return ks; + } finally { + if (in != null) in.close(); + } + } + } + +} diff --git a/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java b/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java index 37d1706..f13c21a 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java +++ b/clients/src/test/java/org/apache/kafka/common/network/EchoServer.java @@ -13,6 +13,7 @@ package org.apache.kafka.common.network; import org.apache.kafka.common.protocol.SecurityProtocol; +import org.apache.kafka.common.security.ssl.SSLFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLSocket; diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java deleted file mode 100644 index a2cf302..0000000 --- a/clients/src/test/java/org/apache/kafka/common/network/SSLFactoryTest.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE - * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file - * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package org.apache.kafka.common.network; - -import javax.net.ssl.*; - -import java.io.File; -import java.util.Map; - -import org.apache.kafka.test.TestSSLUtils; - -import org.junit.Test; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertTrue; - - -/** - * A set of tests for the selector over ssl. These use a test harness that runs a simple socket server that echos back responses. - */ - -public class SSLFactoryTest { - - @Test - public void testSSLFactoryConfiguration() throws Exception { - File trustStoreFile = File.createTempFile("truststore", ".jks"); - Map serverSSLConfig = TestSSLUtils.createSSLConfig(false, true, SSLFactory.Mode.SERVER, trustStoreFile, "server"); - SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER); - sslFactory.configure(serverSSLConfig); - //host and port are hints - SSLEngine engine = sslFactory.createSSLEngine("localhost", 0); - assertNotNull(engine); - String[] expectedProtocols = {"TLSv1.2"}; - assertArrayEquals(expectedProtocols, engine.getEnabledProtocols()); - assertEquals(false, engine.getUseClientMode()); - } - - @Test - public void testClientMode() throws Exception { - File trustStoreFile = File.createTempFile("truststore", ".jks"); - Map clientSSLConfig = TestSSLUtils.createSSLConfig(false, true, SSLFactory.Mode.CLIENT, trustStoreFile, "client"); - SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT); - sslFactory.configure(clientSSLConfig); - //host and port are hints - SSLEngine engine = sslFactory.createSSLEngine("localhost", 0); - assertTrue(engine.getUseClientMode()); - } - -} diff --git a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java index df6279b..df1205c 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SSLSelectorTest.java @@ -24,6 +24,7 @@ import java.net.InetSocketAddress; import java.nio.ByteBuffer; import org.apache.kafka.common.config.SSLConfigs; +import org.apache.kafka.common.security.ssl.SSLFactory; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Utils; diff --git a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java index 8ec5bed..3a684d9 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java @@ -52,7 +52,7 @@ public class SelectorTest { this.server = new EchoServer(configs); this.server.start(); this.time = new MockTime(); - this.channelBuilder = new PlainTextChannelBuilder(); + this.channelBuilder = new PlaintextChannelBuilder(); this.channelBuilder.configure(configs); this.selector = new Selector(5000, new Metrics(), time, "MetricGroup", new LinkedHashMap(), channelBuilder); } diff --git a/clients/src/test/java/org/apache/kafka/common/security/ssl/SSLFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/security/ssl/SSLFactoryTest.java new file mode 100644 index 0000000..0aec666 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/ssl/SSLFactoryTest.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package org.apache.kafka.common.security.ssl; + +import javax.net.ssl.*; + +import java.io.File; +import java.util.Map; + +import org.apache.kafka.test.TestSSLUtils; + +import org.junit.Test; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertTrue; + + +/** + * A set of tests for the selector over ssl. These use a test harness that runs a simple socket server that echos back responses. + */ + +public class SSLFactoryTest { + + @Test + public void testSSLFactoryConfiguration() throws Exception { + File trustStoreFile = File.createTempFile("truststore", ".jks"); + Map serverSSLConfig = TestSSLUtils.createSSLConfig(false, true, SSLFactory.Mode.SERVER, trustStoreFile, "server"); + SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER); + sslFactory.configure(serverSSLConfig); + //host and port are hints + SSLEngine engine = sslFactory.createSSLEngine("localhost", 0); + assertNotNull(engine); + String[] expectedProtocols = {"TLSv1.2"}; + assertArrayEquals(expectedProtocols, engine.getEnabledProtocols()); + assertEquals(false, engine.getUseClientMode()); + } + + @Test + public void testClientMode() throws Exception { + File trustStoreFile = File.createTempFile("truststore", ".jks"); + Map clientSSLConfig = TestSSLUtils.createSSLConfig(false, true, SSLFactory.Mode.CLIENT, trustStoreFile, "client"); + SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT); + sslFactory.configure(clientSSLConfig); + //host and port are hints + SSLEngine engine = sslFactory.createSSLEngine("localhost", 0); + assertTrue(engine.getUseClientMode()); + } + +} diff --git a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java index 601e05d..c01cf37 100644 --- a/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java +++ b/clients/src/test/java/org/apache/kafka/test/TestSSLUtils.java @@ -18,7 +18,7 @@ package org.apache.kafka.test; import org.apache.kafka.common.config.SSLConfigs; -import org.apache.kafka.common.network.SSLFactory; +import org.apache.kafka.common.security.ssl.SSLFactory; import org.apache.kafka.clients.CommonClientConfigs; import java.io.File; diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index 0b44d57..649812d 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -33,7 +33,8 @@ import kafka.utils._ import org.apache.kafka.common.MetricName import org.apache.kafka.common.metrics._ import org.apache.kafka.common.network.{InvalidReceiveException, ChannelBuilder, - PlainTextChannelBuilder, SSLChannelBuilder, SSLFactory} + PlaintextChannelBuilder, SSLChannelBuilder} +import org.apache.kafka.common.security.ssl.SSLFactory import org.apache.kafka.common.protocol.SecurityProtocol import org.apache.kafka.common.protocol.types.SchemaException import org.apache.kafka.common.utils.{SystemTime, Time, Utils} @@ -113,7 +114,7 @@ class SocketServer(val config: KafkaConfig, val metrics: Metrics, val time: Time } // register the processor threads for notification of responses - requestChannel.addResponseListener((id:Int) => processors(id).wakeup()) + requestChannel.addResponseListener(id => processors(id).wakeup()) /** * Shutdown the socket server @@ -499,8 +500,8 @@ private[kafka] class Processor(val id: Int, } private def createChannelBuilder(): ChannelBuilder = { - val channelBuilder:ChannelBuilder = if (protocol == SecurityProtocol.SSL) new SSLChannelBuilder(SSLFactory.Mode.SERVER) - else new PlainTextChannelBuilder() + val channelBuilder: ChannelBuilder = if (protocol == SecurityProtocol.SSL) new SSLChannelBuilder(SSLFactory.Mode.SERVER) + else new PlaintextChannelBuilder() channelBuilder.configure(channelConfigs) channelBuilder diff --git a/core/src/test/scala/integration/kafka/api/SSLConsumerTest.scala b/core/src/test/scala/integration/kafka/api/SSLConsumerTest.scala index 0952b5f..d62bf29 100644 --- a/core/src/test/scala/integration/kafka/api/SSLConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/SSLConsumerTest.scala @@ -199,41 +199,6 @@ class SSLConsumerTest extends KafkaServerTestHarness with Logging { assertNull(this.consumers(0).partitionsFor("non-exist-topic")) } - def testPartitionReassignmentCallback() { - val callback = new TestConsumerReassignmentCallback() - val consumer0 = TestUtils.createNewConsumer(TestUtils.getSSLBrokerListStrFromServers(servers), - groupId = "my-test", - partitionAssignmentStrategy= "range", - sessionTimeout = 200, - callback = Some(callback), - enableSSL=true, - trustStoreFile=Some(trustStoreFile)) - consumer0.subscribe(topic) - // the initial subscription should cause a callback execution - while (callback.callsToAssigned == 0) - consumer0.poll(50) - - // get metadata for the topic - var parts = consumer0.partitionsFor(ConsumerCoordinator.OffsetsTopicName) - while (parts == null) - parts = consumer0.partitionsFor(ConsumerCoordinator.OffsetsTopicName) - assertEquals(1, parts.size) - assertNotNull(parts(0).leader()) - - // shutdown the coordinator - val coordinator = parts(0).leader().id() - this.servers(coordinator).shutdown() - - // this should cause another callback execution - while (callback.callsToAssigned < 2) - consumer0.poll(50) - assertEquals(2, callback.callsToAssigned) - assertEquals(2, callback.callsToRevoked) - - consumer0.close() - } - - private class TestConsumerReassignmentCallback extends ConsumerRebalanceCallback { var callsToAssigned = 0 var callsToRevoked = 0 diff --git a/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala b/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala index e5861ae..2f7ab20 100644 --- a/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala +++ b/core/src/test/scala/integration/kafka/api/SSLProducerSendTest.scala @@ -47,7 +47,7 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { private var consumer2: SimpleConsumer = null private val topic = "topic" - private val numRecords = 10 + private val numRecords = 100 override def setUp() { super.setUp() @@ -72,7 +72,8 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { */ @Test def testSendOffset() { - var producer = TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), enableSSL=true, trustStoreFile=Some(trustStoreFile)) + var sslProducer = TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), enableSSL=true, trustStoreFile=Some(trustStoreFile)) + var producer = TestUtils.createNewProducer(TestUtils.getBrokerListStrFromServers(servers)) val partition = new Integer(0) object callback extends Callback { @@ -95,43 +96,53 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { // send a normal record val record0 = new ProducerRecord[Array[Byte],Array[Byte]](topic, partition, "key".getBytes, "value".getBytes) - assertEquals("Should have offset 0", 0L, producer.send(record0, callback).get.offset) + assertEquals("Should have offset 0", 0L, sslProducer.send(record0, callback).get.offset) // send a record with null value should be ok val record1 = new ProducerRecord[Array[Byte],Array[Byte]](topic, partition, "key".getBytes, null) - assertEquals("Should have offset 1", 1L, producer.send(record1, callback).get.offset) + assertEquals("Should have offset 1", 1L, sslProducer.send(record1, callback).get.offset) // send a record with null key should be ok val record2 = new ProducerRecord[Array[Byte],Array[Byte]](topic, partition, null, "value".getBytes) - assertEquals("Should have offset 2", 2L, producer.send(record2, callback).get.offset) + assertEquals("Should have offset 2", 2L, sslProducer.send(record2, callback).get.offset) // send a record with null part id should be ok val record3 = new ProducerRecord[Array[Byte],Array[Byte]](topic, null, "key".getBytes, "value".getBytes) - assertEquals("Should have offset 3", 3L, producer.send(record3, callback).get.offset) - + assertEquals("Should have offset 3", 3L, sslProducer.send(record3, callback).get.offset) // send a record with null topic should fail try { val record4 = new ProducerRecord[Array[Byte],Array[Byte]](null, partition, "key".getBytes, "value".getBytes) - producer.send(record4, callback) + sslProducer.send(record4, callback) fail("Should not allow sending a record without topic") } catch { case iae: IllegalArgumentException => // this is ok case e: Throwable => fail("Only expecting IllegalArgumentException", e) } - // non-blocking send a list of records + // non-blocking send a list of records with sslProducer + for (i <- 1 to numRecords) + sslProducer.send(record0, callback) + // check that all messages have been acked via offset + assertEquals("Should have offset " + numRecords + 4L, numRecords + 4L, sslProducer.send(record0, callback).get.offset) + + //non-blocking send a list of records with plaintext producer for (i <- 1 to numRecords) producer.send(record0, callback) // check that all messages have been acked via offset - assertEquals("Should have offset " + (numRecords + 4), numRecords + 4L, producer.send(record0, callback).get.offset) + assertEquals("Should have offset " + (numRecords * 2 + 5L), numRecords * 2 + 5L, producer.send(record0, callback).get.offset) } finally { + if (sslProducer != null) { + sslProducer.close() + sslProducer = null + } if (producer != null) { producer.close() producer = null } + } } @@ -223,137 +234,4 @@ class SSLProducerSendTest extends JUnit3Suite with KafkaServerTestHarness { } } } - - /** - * testAutoCreateTopic - * - * The topic should be created upon sending the first message - */ - @Test - def testAutoCreateTopic() { - var producer = TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), enableSSL=true, trustStoreFile=Some(trustStoreFile), retries = 5) - - try { - // Send a message to auto-create the topic - val record = new ProducerRecord[Array[Byte],Array[Byte]](topic, null, "key".getBytes, "value".getBytes) - assertEquals("Should have offset 0", 0L, producer.send(record).get.offset) - - // double check that the topic is created with leader elected - TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, 0) - - } finally { - if (producer != null) { - producer.close() - producer = null - } - } - } - - - /** - * Test close with zero timeout from caller thread - */ - @Test - def testCloseWithZeroTimeoutFromCallerThread() { - var producer: KafkaProducer[Array[Byte],Array[Byte]] = null - try { - // create topic - val leaders = TestUtils.createTopic(zkClient, topic, 2, 2, servers) - val leader0 = leaders(0) - val leader1 = leaders(1) - - // create record - val record0 = new ProducerRecord[Array[Byte], Array[Byte]](topic, 0, null, "value".getBytes) - val record1 = new ProducerRecord[Array[Byte], Array[Byte]](topic, 1, null, "value".getBytes) - - // Test closing from caller thread. - for (i <- 0 until 50) { - producer = TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), enableSSL=true, trustStoreFile=Some(trustStoreFile), lingerMs = Long.MaxValue) - val responses = (0 until numRecords) map (i => producer.send(record0)) - assertTrue("No request is complete.", responses.forall(!_.isDone())) - producer.close(0, TimeUnit.MILLISECONDS) - responses.foreach { future => - try { - future.get() - fail("No message should be sent successfully.") - } catch { - case e: Exception => - assertEquals("java.lang.IllegalStateException: Producer is closed forcefully.", e.getMessage) - } - } - val fetchResponse = if (leader0.get == configs(0).brokerId) { - consumer1.fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, Int.MaxValue).build()) - } else { - consumer2.fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, Int.MaxValue).build()) - } - assertEquals("Fetch response should have no message returned.", 0, fetchResponse.messageSet(topic, 0).size) - } - } finally { - if (producer != null) - producer.close() - } - } - - /** - * Test close with zero and non-zero timeout from sender thread - */ - @Test - def testCloseWithZeroTimeoutFromSenderThread() { - var producer: KafkaProducer[Array[Byte],Array[Byte]] = null - try { - // create topic - val leaders = TestUtils.createTopic(zkClient, topic, 2, 2, servers) - val leader0 = leaders(0) - val leader1 = leaders(1) - - // create record - val record0 = new ProducerRecord[Array[Byte], Array[Byte]](topic, 0, null, "value".getBytes) - val record1 = new ProducerRecord[Array[Byte], Array[Byte]](topic, 1, null, "value".getBytes) - - // Test closing from sender thread. - class CloseCallback(producer: KafkaProducer[Array[Byte], Array[Byte]]) extends Callback { - override def onCompletion(metadata: RecordMetadata, exception: Exception) { - // Trigger another batch in accumulator before close the producer. These messages should - // not be sent. - (0 until numRecords) map (i => producer.send(record1)) - // The close call will be called by all the message callbacks. This tests idempotence of the close call. - producer.close(0, TimeUnit.MILLISECONDS) - // Test close with non zero timeout. Should not block at all. - producer.close(Long.MaxValue, TimeUnit.MICROSECONDS) - } - } - - for (i <- 0 until 50) { - //producer = TestUtils.createNewProducer(brokerList, lingerMs = Long.MaxValue) - producer = TestUtils.createNewProducer(TestUtils.getSSLBrokerListStrFromServers(servers), enableSSL=true, trustStoreFile=Some(trustStoreFile), lingerMs = Long.MaxValue) - // send message to partition 0 - var responses = (0 until numRecords) map (i => producer.send(record0)) - // send message to partition 1 - responses ++= ((0 until numRecords) map (i => producer.send(record1))) - assertTrue("No request is complete.", responses.forall(!_.isDone())) - // flush the messages. - producer.flush() - assertTrue("All request are complete.", responses.forall(_.isDone())) - // Check the messages received by broker. - val fetchResponse0 = if (leader0.get == configs(0).brokerId) { - consumer1.fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, Int.MaxValue).build()) - } else { - consumer2.fetch(new FetchRequestBuilder().addFetch(topic, 0, 0, Int.MaxValue).build()) - } - val fetchResponse1 = if (leader1.get == configs(0).brokerId) { - consumer1.fetch(new FetchRequestBuilder().addFetch(topic, 1, 0, Int.MaxValue).build()) - } else { - consumer2.fetch(new FetchRequestBuilder().addFetch(topic, 1, 0, Int.MaxValue).build()) - } - val expectedNumRecords = (i + 1) * numRecords - assertEquals("Fetch response to partition 1 should have %d messages.".format(expectedNumRecords), - expectedNumRecords, fetchResponse0.messageSet(topic, 0).size) - assertEquals("Fetch response to partition 1 should have %d messages.".format(expectedNumRecords), - expectedNumRecords, fetchResponse1.messageSet(topic, 1).size) - } - } finally { - if (producer != null) - producer.close() - } - } } diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala index 39dadc5..9f00df1 100755 --- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala +++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala @@ -52,7 +52,7 @@ import org.apache.kafka.clients.consumer.KafkaConsumer import org.apache.kafka.clients.consumer.ConsumerRebalanceCallback import org.apache.kafka.common.serialization.ByteArrayDeserializer import org.apache.kafka.clients.CommonClientConfigs -import org.apache.kafka.common.network.SSLFactory +import org.apache.kafka.common.security.ssl.SSLFactory import org.apache.kafka.common.config.SSLConfigs import org.apache.kafka.test.TestSSLUtils -- 2.4.6 From e9c1b5640e3853cf42e969b2a0a2fffa0ae90bf2 Mon Sep 17 00:00:00 2001 From: Sriharsha Chintalapani Date: Sun, 16 Aug 2015 20:39:54 -0700 Subject: [PATCH 30/30] KAFKA-1690. Add SSL support to Kafka Broker, Producer & Client. --- build.gradle | 4 ---- 1 file changed, 4 deletions(-) diff --git a/build.gradle b/build.gradle index 17fc223..a429fd1 100644 --- a/build.gradle +++ b/build.gradle @@ -114,10 +114,6 @@ subprojects { } } - tasks.withType(Test) { - maxParallelForks = Runtime.runtime.availableProcessors() - } - jar { from '../LICENSE' from '../NOTICE' -- 2.4.6