***************************************************** Summary ------- Generated at: 2013-06-13T13:53:12-04:00 Notes: 11 Binaries: 0 Archives: 7 Standards: 595 Apache Licensed: 387 Generated Documents: 0 JavaDocs are generated and so license header is optional Generated files do not required license headers 208 Unknown Licenses ******************************* Unapproved licenses: ./.gitignore ./.rat-excludes ./README.md ./contrib/hadoop-consumer/build.sbt ./contrib/hadoop-producer/README.md ./contrib/hadoop-producer/build.sbt ./core/build.sbt ./core/src/main/scala/kafka/admin/CheckReassignmentStatus.scala ./core/src/main/scala/kafka/api/ApiUtils.scala ./core/src/main/scala/kafka/api/UpdateMetadataRequest.scala ./core/src/main/scala/kafka/client/ClientUtils.scala ./core/src/main/scala/kafka/common/BrokerNotExistException.scala ./core/src/main/scala/kafka/consumer/ConsumerTopicStat.scala ./core/src/main/scala/kafka/consumer/package.html ./core/src/main/scala/kafka/log/LogSegment.scala ./core/src/main/scala/kafka/log/package.html ./core/src/main/scala/kafka/message/package.html ./core/src/main/scala/kafka/network/package.html ./core/src/main/scala/kafka/producer/async/AsyncProducerStats.scala ./core/src/main/scala/kafka/server/package.html ./core/src/main/scala/kafka/utils/CommandLineUtils.scala ./core/src/main/scala/kafka/utils/FileLock.scala ./core/src/main/scala/kafka/utils/Json.scala ./core/src/main/scala/kafka/utils/Topic.scala ./core/src/main/scala/kafka/utils/package.html ./core/src/test/scala/unit/kafka/log/LogSegmentTest.scala ./examples/build.sbt ./perf/build.sbt ./project/plugins.sbt ./system_test/__init__.py ./system_test/cluster_config.json ./system_test/logging.conf ./system_test/metrics.json ./system_test/run_sanity.sh ./system_test/testcase_to_run.json ./system_test/testcase_to_run_all.json ./system_test/testcase_to_run_sanity.json ./system_test/testcase_to_skip.json ./system_test/migration_tool_testsuite/__init__.py ./system_test/migration_tool_testsuite/cluster_config.json ./system_test/migration_tool_testsuite/testcase_9001/testcase_9001_properties.json ./system_test/migration_tool_testsuite/testcase_9003/cluster_config.json ./system_test/migration_tool_testsuite/testcase_9003/testcase_9003_properties.json ./system_test/migration_tool_testsuite/testcase_9004/cluster_config.json ./system_test/migration_tool_testsuite/testcase_9004/testcase_9004_properties.json ./system_test/migration_tool_testsuite/testcase_9005/cluster_config.json ./system_test/migration_tool_testsuite/testcase_9005/testcase_9005_properties.json ./system_test/migration_tool_testsuite/testcase_9006/cluster_config.json ./system_test/migration_tool_testsuite/testcase_9006/testcase_9006_properties.json ./system_test/mirror_maker/bin/expected.out ./system_test/mirror_maker_testsuite/__init__.py ./system_test/mirror_maker_testsuite/cluster_config.json ./system_test/mirror_maker_testsuite/config/console_consumer.properties ./system_test/mirror_maker_testsuite/config/consumer.properties ./system_test/mirror_maker_testsuite/config/log4j.properties ./system_test/mirror_maker_testsuite/config/mirror_consumer.properties ./system_test/mirror_maker_testsuite/config/mirror_producer.properties ./system_test/mirror_maker_testsuite/config/producer.properties ./system_test/mirror_maker_testsuite/config/producer_performance.properties ./system_test/mirror_maker_testsuite/testcase_5001/testcase_5001_properties.json ./system_test/mirror_maker_testsuite/testcase_5002/testcase_5002_properties.json ./system_test/mirror_maker_testsuite/testcase_5003/cluster_config.json ./system_test/mirror_maker_testsuite/testcase_5003/testcase_5003_properties.json ./system_test/mirror_maker_testsuite/testcase_5004/cluster_config.json ./system_test/mirror_maker_testsuite/testcase_5004/testcase_5004_properties.json ./system_test/mirror_maker_testsuite/testcase_5005/cluster_config.json ./system_test/mirror_maker_testsuite/testcase_5005/testcase_5005_properties.json ./system_test/mirror_maker_testsuite/testcase_5006/cluster_config.json ./system_test/mirror_maker_testsuite/testcase_5006/testcase_5006_properties.json ./system_test/producer_perf/bin/expected.out ./system_test/replication_testsuite/__init__.py ./system_test/replication_testsuite/config/console_consumer.properties ./system_test/replication_testsuite/config/consumer.properties ./system_test/replication_testsuite/config/log4j.properties ./system_test/replication_testsuite/config/producer.properties ./system_test/replication_testsuite/config/producer_performance.properties ./system_test/replication_testsuite/testcase_0001/testcase_0001_properties.json ./system_test/replication_testsuite/testcase_0002/testcase_0002_properties.json ./system_test/replication_testsuite/testcase_0003/testcase_0003_properties.json ./system_test/replication_testsuite/testcase_0004/testcase_0004_properties.json ./system_test/replication_testsuite/testcase_0005/testcase_0005_properties.json ./system_test/replication_testsuite/testcase_0006/testcase_0006_properties.json ./system_test/replication_testsuite/testcase_0007/testcase_0007_properties.json ./system_test/replication_testsuite/testcase_0008/testcase_0008_properties.json ./system_test/replication_testsuite/testcase_0009/testcase_0009_properties.json ./system_test/replication_testsuite/testcase_0010/testcase_0010_properties.json ./system_test/replication_testsuite/testcase_0011/testcase_0011_properties.json ./system_test/replication_testsuite/testcase_0021/cluster_config.json ./system_test/replication_testsuite/testcase_0021/testcase_0021_properties.json ./system_test/replication_testsuite/testcase_0022/cluster_config.json ./system_test/replication_testsuite/testcase_0022/testcase_0022_properties.json ./system_test/replication_testsuite/testcase_0023/cluster_config.json ./system_test/replication_testsuite/testcase_0023/testcase_0023_properties.json ./system_test/replication_testsuite/testcase_0024/testcase_0024_properties.json ./system_test/replication_testsuite/testcase_0101/testcase_0101_properties.json ./system_test/replication_testsuite/testcase_0102/testcase_0102_properties.json ./system_test/replication_testsuite/testcase_0103/testcase_0103_properties.json ./system_test/replication_testsuite/testcase_0104/testcase_0104_properties.json ./system_test/replication_testsuite/testcase_0105/testcase_0105_properties.json ./system_test/replication_testsuite/testcase_0106/testcase_0106_properties.json ./system_test/replication_testsuite/testcase_0107/testcase_0107_properties.json ./system_test/replication_testsuite/testcase_0108/testcase_0108_properties.json ./system_test/replication_testsuite/testcase_0109/testcase_0109_properties.json ./system_test/replication_testsuite/testcase_0110/testcase_0110_properties.json ./system_test/replication_testsuite/testcase_0111/testcase_0111_properties.json ./system_test/replication_testsuite/testcase_0112/testcase_0112_properties.json ./system_test/replication_testsuite/testcase_0113/testcase_0113_properties.json ./system_test/replication_testsuite/testcase_0114/testcase_0114_properties.json ./system_test/replication_testsuite/testcase_0115/testcase_0115_properties.json ./system_test/replication_testsuite/testcase_0116/testcase_0116_properties.json ./system_test/replication_testsuite/testcase_0117/testcase_0117_properties.json ./system_test/replication_testsuite/testcase_0118/testcase_0118_properties.json ./system_test/replication_testsuite/testcase_0119/testcase_0119_properties.json ./system_test/replication_testsuite/testcase_0121/cluster_config.json ./system_test/replication_testsuite/testcase_0121/testcase_0121_properties.json ./system_test/replication_testsuite/testcase_0122/cluster_config.json ./system_test/replication_testsuite/testcase_0122/testcase_0122_properties.json ./system_test/replication_testsuite/testcase_0123/cluster_config.json ./system_test/replication_testsuite/testcase_0123/testcase_0123_properties.json ./system_test/replication_testsuite/testcase_0124/cluster_config.json ./system_test/replication_testsuite/testcase_0124/testcase_0124_properties.json ./system_test/replication_testsuite/testcase_0125/cluster_config.json ./system_test/replication_testsuite/testcase_0125/testcase_0125_properties.json ./system_test/replication_testsuite/testcase_0126/cluster_config.json ./system_test/replication_testsuite/testcase_0126/testcase_0126_properties.json ./system_test/replication_testsuite/testcase_0127/cluster_config.json ./system_test/replication_testsuite/testcase_0127/testcase_0127_properties.json ./system_test/replication_testsuite/testcase_0128/testcase_0128_properties.json ./system_test/replication_testsuite/testcase_0131/cluster_config.json ./system_test/replication_testsuite/testcase_0131/testcase_0131_properties.json ./system_test/replication_testsuite/testcase_0132/cluster_config.json ./system_test/replication_testsuite/testcase_0132/testcase_0132_properties.json ./system_test/replication_testsuite/testcase_0133/cluster_config.json ./system_test/replication_testsuite/testcase_0133/testcase_0133_properties.json ./system_test/replication_testsuite/testcase_0134/testcase_0134_properties.json ./system_test/replication_testsuite/testcase_0151/testcase_0151_properties.json ./system_test/replication_testsuite/testcase_0152/testcase_0152_properties.json ./system_test/replication_testsuite/testcase_0153/testcase_0153_properties.json ./system_test/replication_testsuite/testcase_0154/testcase_0154_properties.json ./system_test/replication_testsuite/testcase_0155/testcase_0155_properties.json ./system_test/replication_testsuite/testcase_0156/testcase_0156_properties.json ./system_test/replication_testsuite/testcase_0157/testcase_0157_properties.json ./system_test/replication_testsuite/testcase_0158/testcase_0158_properties.json ./system_test/replication_testsuite/testcase_0159/testcase_0159_properties.json ./system_test/replication_testsuite/testcase_0201/testcase_0201_properties.json ./system_test/replication_testsuite/testcase_0202/testcase_0202_properties.json ./system_test/replication_testsuite/testcase_0203/testcase_0203_properties.json ./system_test/replication_testsuite/testcase_0204/testcase_0204_properties.json ./system_test/replication_testsuite/testcase_0205/testcase_0205_properties.json ./system_test/replication_testsuite/testcase_0206/testcase_0206_properties.json ./system_test/replication_testsuite/testcase_0207/testcase_0207_properties.json ./system_test/replication_testsuite/testcase_0208/testcase_0208_properties.json ./system_test/replication_testsuite/testcase_0209/testcase_0209_properties.json ./system_test/replication_testsuite/testcase_0251/testcase_0251_properties.json ./system_test/replication_testsuite/testcase_0252/testcase_0252_properties.json ./system_test/replication_testsuite/testcase_0253/testcase_0253_properties.json ./system_test/replication_testsuite/testcase_0254/testcase_0254_properties.json ./system_test/replication_testsuite/testcase_0255/testcase_0255_properties.json ./system_test/replication_testsuite/testcase_0256/testcase_0256_properties.json ./system_test/replication_testsuite/testcase_0257/testcase_0257_properties.json ./system_test/replication_testsuite/testcase_0258/testcase_0258_properties.json ./system_test/replication_testsuite/testcase_0259/testcase_0259_properties.json ./system_test/replication_testsuite/testcase_0301/testcase_0301_properties.json ./system_test/replication_testsuite/testcase_0302/testcase_0302_properties.json ./system_test/replication_testsuite/testcase_0303/testcase_0303_properties.json ./system_test/replication_testsuite/testcase_0304/testcase_0304_properties.json ./system_test/replication_testsuite/testcase_0305/testcase_0305_properties.json ./system_test/replication_testsuite/testcase_0306/testcase_0306_properties.json ./system_test/replication_testsuite/testcase_0307/testcase_0307_properties.json ./system_test/replication_testsuite/testcase_0308/testcase_0308_properties.json ./system_test/replication_testsuite/testcase_0309/testcase_0309_properties.json ./system_test/replication_testsuite/testcase_1/cluster_config.json ./system_test/replication_testsuite/testcase_1/testcase_1_properties.json ./system_test/replication_testsuite/testcase_4001/cluster_config.json ./system_test/replication_testsuite/testcase_4001/testcase_4001_properties.json ./system_test/replication_testsuite/testcase_4002/cluster_config.json ./system_test/replication_testsuite/testcase_4002/testcase_4002_properties.json ./system_test/replication_testsuite/testcase_4003/cluster_config.json ./system_test/replication_testsuite/testcase_4003/testcase_4003_properties.json ./system_test/replication_testsuite/testcase_4004/cluster_config.json ./system_test/replication_testsuite/testcase_4004/testcase_4004_properties.json ./system_test/replication_testsuite/testcase_4005/cluster_config.json ./system_test/replication_testsuite/testcase_4005/testcase_4005_properties.json ./system_test/replication_testsuite/testcase_4006/cluster_config.json ./system_test/replication_testsuite/testcase_4006/testcase_4006_properties.json ./system_test/replication_testsuite/testcase_4007/cluster_config.json ./system_test/replication_testsuite/testcase_4007/testcase_4007_properties.json ./system_test/replication_testsuite/testcase_4008/cluster_config.json ./system_test/replication_testsuite/testcase_4008/testcase_4008_properties.json ./system_test/replication_testsuite/testcase_4011/cluster_config.json ./system_test/replication_testsuite/testcase_4011/testcase_4011_properties.json ./system_test/replication_testsuite/testcase_4012/cluster_config.json ./system_test/replication_testsuite/testcase_4012/testcase_4012_properties.json ./system_test/replication_testsuite/testcase_4013/cluster_config.json ./system_test/replication_testsuite/testcase_4013/testcase_4013_properties.json ./system_test/replication_testsuite/testcase_4014/cluster_config.json ./system_test/replication_testsuite/testcase_4014/testcase_4014_properties.json ./system_test/replication_testsuite/testcase_4015/cluster_config.json ./system_test/replication_testsuite/testcase_4015/testcase_4015_properties.json ./system_test/replication_testsuite/testcase_4016/cluster_config.json ./system_test/replication_testsuite/testcase_4016/testcase_4016_properties.json ./system_test/replication_testsuite/testcase_4017/cluster_config.json ./system_test/replication_testsuite/testcase_4017/testcase_4017_properties.json ./system_test/replication_testsuite/testcase_4018/cluster_config.json ./system_test/replication_testsuite/testcase_4018/testcase_4018_properties.json ./system_test/replication_testsuite/testcase_9051/cluster_config.json ./system_test/replication_testsuite/testcase_9051/testcase_9051_properties.json ./system_test/utils/__init__.py ******************************* Archives: + ./contrib/hadoop-consumer/lib/piggybank.jar + ./contrib/hadoop-producer/lib/piggybank.jar + ./lib/apache-rat-0.8.jar + ./lib/sbt-launch.jar + ./system_test/migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar + ./system_test/migration_tool_testsuite/0.7/lib/kafka-perf-0.7.0.jar + ./system_test/migration_tool_testsuite/0.7/lib/zkclient-0.1.jar ***************************************************** Files with Apache License headers will be marked AL Binary files (which do not require AL headers) will be marked B Compressed archives will be marked A Notices, licenses etc will be marked N !????? ./.gitignore !????? ./.rat-excludes N ./DISCLAIMER N ./LICENSE N ./NOTICE !????? ./README.md AL ./sbt AL ./sbt.bat AL ./bin/kafka-check-reassignment-status.sh AL ./bin/kafka-console-consumer-log4j.properties AL ./bin/kafka-console-consumer.sh AL ./bin/kafka-console-producer.sh AL ./bin/kafka-consumer-perf-test.sh AL ./bin/kafka-create-topic.sh AL ./bin/kafka-delete-topic.sh AL ./bin/kafka-list-topic.sh AL ./bin/kafka-preferred-replica-election.sh AL ./bin/kafka-producer-perf-test.sh AL ./bin/kafka-reassign-partitions.sh AL ./bin/kafka-replay-log-producer.sh AL ./bin/kafka-run-class.sh AL ./bin/kafka-server-start.sh AL ./bin/kafka-server-stop.sh AL ./bin/kafka-simple-consumer-perf-test.sh AL ./bin/kafka-simple-consumer-shell.sh AL ./bin/run-rat.sh AL ./bin/zookeeper-server-start.sh AL ./bin/zookeeper-server-stop.sh AL ./bin/zookeeper-shell.sh AL ./bin/windows/kafka-console-consumer.bat AL ./bin/windows/kafka-console-producer.bat AL ./bin/windows/kafka-run-class.bat AL ./bin/windows/kafka-server-start.bat AL ./bin/windows/kafka-server-stop.bat AL ./bin/windows/zookeeper-server-start.bat AL ./bin/windows/zookeeper-server-stop.bat AL ./config/consumer.properties AL ./config/log4j.properties AL ./config/producer.properties AL ./config/server.properties AL ./config/zookeeper.properties N ./contrib/hadoop-consumer/LICENSE N ./contrib/hadoop-consumer/README !????? ./contrib/hadoop-consumer/build.sbt AL ./contrib/hadoop-consumer/copy-jars.sh AL ./contrib/hadoop-consumer/hadoop-setup.sh AL ./contrib/hadoop-consumer/run-class.sh A ./contrib/hadoop-consumer/lib/piggybank.jar AL ./contrib/hadoop-consumer/src/main/java/kafka/etl/KafkaETLContext.java AL ./contrib/hadoop-consumer/src/main/java/kafka/etl/KafkaETLInputFormat.java AL ./contrib/hadoop-consumer/src/main/java/kafka/etl/KafkaETLJob.java AL ./contrib/hadoop-consumer/src/main/java/kafka/etl/KafkaETLKey.java AL ./contrib/hadoop-consumer/src/main/java/kafka/etl/KafkaETLRecordReader.java AL ./contrib/hadoop-consumer/src/main/java/kafka/etl/KafkaETLRequest.java AL ./contrib/hadoop-consumer/src/main/java/kafka/etl/KafkaETLUtils.java AL ./contrib/hadoop-consumer/src/main/java/kafka/etl/Props.java AL ./contrib/hadoop-consumer/src/main/java/kafka/etl/UndefinedPropertyException.java AL ./contrib/hadoop-consumer/src/main/java/kafka/etl/impl/DataGenerator.java AL ./contrib/hadoop-consumer/src/main/java/kafka/etl/impl/SimpleKafkaETLJob.java AL ./contrib/hadoop-consumer/src/main/java/kafka/etl/impl/SimpleKafkaETLMapper.java AL ./contrib/hadoop-consumer/test/test.properties N ./contrib/hadoop-producer/LICENSE !????? ./contrib/hadoop-producer/README.md !????? ./contrib/hadoop-producer/build.sbt A ./contrib/hadoop-producer/lib/piggybank.jar AL ./contrib/hadoop-producer/src/main/java/kafka/bridge/examples/TextPublisher.java AL ./contrib/hadoop-producer/src/main/java/kafka/bridge/hadoop/KafkaOutputFormat.java AL ./contrib/hadoop-producer/src/main/java/kafka/bridge/hadoop/KafkaRecordWriter.java AL ./contrib/hadoop-producer/src/main/java/kafka/bridge/pig/AvroKafkaStorage.java !????? ./core/build.sbt AL ./core/src/main/scala/kafka/Kafka.scala AL ./core/src/main/scala/kafka/admin/AdminUtils.scala !????? ./core/src/main/scala/kafka/admin/CheckReassignmentStatus.scala AL ./core/src/main/scala/kafka/admin/CreateTopicCommand.scala AL ./core/src/main/scala/kafka/admin/DeleteTopicCommand.scala AL ./core/src/main/scala/kafka/admin/ListTopicCommand.scala AL ./core/src/main/scala/kafka/admin/PreferredReplicaLeaderElectionCommand.scala AL ./core/src/main/scala/kafka/admin/ReassignPartitionsCommand.scala AL ./core/src/main/scala/kafka/admin/ShutdownBroker.scala !????? ./core/src/main/scala/kafka/api/ApiUtils.scala AL ./core/src/main/scala/kafka/api/ControlledShutdownRequest.scala AL ./core/src/main/scala/kafka/api/ControlledShutdownResponse.scala AL ./core/src/main/scala/kafka/api/FetchRequest.scala AL ./core/src/main/scala/kafka/api/FetchResponse.scala AL ./core/src/main/scala/kafka/api/LeaderAndIsrRequest.scala AL ./core/src/main/scala/kafka/api/LeaderAndIsrResponse.scala AL ./core/src/main/scala/kafka/api/OffsetRequest.scala AL ./core/src/main/scala/kafka/api/OffsetResponse.scala AL ./core/src/main/scala/kafka/api/ProducerRequest.scala AL ./core/src/main/scala/kafka/api/ProducerResponse.scala AL ./core/src/main/scala/kafka/api/RequestKeys.scala AL ./core/src/main/scala/kafka/api/RequestOrResponse.scala AL ./core/src/main/scala/kafka/api/StopReplicaRequest.scala AL ./core/src/main/scala/kafka/api/StopReplicaResponse.scala AL ./core/src/main/scala/kafka/api/TopicMetadata.scala AL ./core/src/main/scala/kafka/api/TopicMetadataRequest.scala AL ./core/src/main/scala/kafka/api/TopicMetadataResponse.scala !????? ./core/src/main/scala/kafka/api/UpdateMetadataRequest.scala AL ./core/src/main/scala/kafka/api/UpdateMetadataResponse.scala !????? ./core/src/main/scala/kafka/client/ClientUtils.scala AL ./core/src/main/scala/kafka/cluster/Broker.scala AL ./core/src/main/scala/kafka/cluster/Cluster.scala AL ./core/src/main/scala/kafka/cluster/Partition.scala AL ./core/src/main/scala/kafka/cluster/Replica.scala AL ./core/src/main/scala/kafka/common/AdminCommandFailedException.scala AL ./core/src/main/scala/kafka/common/BrokerNotAvailableException.scala !????? ./core/src/main/scala/kafka/common/BrokerNotExistException.scala AL ./core/src/main/scala/kafka/common/ClientIdAndBroker.scala AL ./core/src/main/scala/kafka/common/ClientIdAndTopic.scala AL ./core/src/main/scala/kafka/common/Config.scala AL ./core/src/main/scala/kafka/common/ConsumerReblanceFailedException.scala AL ./core/src/main/scala/kafka/common/ControllerMovedException.scala AL ./core/src/main/scala/kafka/common/ErrorMapping.scala AL ./core/src/main/scala/kafka/common/FailedToSendMessageException.scala AL ./core/src/main/scala/kafka/common/InvalidConfigException.scala AL ./core/src/main/scala/kafka/common/InvalidMessageSizeException.scala AL ./core/src/main/scala/kafka/common/InvalidOffsetException.scala AL ./core/src/main/scala/kafka/common/InvalidTopicException.scala AL ./core/src/main/scala/kafka/common/KafkaException.scala AL ./core/src/main/scala/kafka/common/KafkaStorageException.scala AL ./core/src/main/scala/kafka/common/KafkaZookeperClient.scala AL ./core/src/main/scala/kafka/common/LeaderElectionNotNeededException.scala AL ./core/src/main/scala/kafka/common/LeaderNotAvailableException.scala AL ./core/src/main/scala/kafka/common/MessageSizeTooLargeException.scala AL ./core/src/main/scala/kafka/common/NoBrokersForPartitionException.scala AL ./core/src/main/scala/kafka/common/NoEpochForPartitionException.scala AL ./core/src/main/scala/kafka/common/NoReplicaOnlineException.scala AL ./core/src/main/scala/kafka/common/NotLeaderForPartitionException.scala AL ./core/src/main/scala/kafka/common/OffsetOutOfRangeException.scala AL ./core/src/main/scala/kafka/common/QueueFullException.scala AL ./core/src/main/scala/kafka/common/ReplicaNotAvailableException.scala AL ./core/src/main/scala/kafka/common/RequestTimedOutException.scala AL ./core/src/main/scala/kafka/common/StateChangeFailedException.scala AL ./core/src/main/scala/kafka/common/Topic.scala AL ./core/src/main/scala/kafka/common/TopicAndPartition.scala AL ./core/src/main/scala/kafka/common/TopicExistsException.scala AL ./core/src/main/scala/kafka/common/UnavailableProducerException.scala AL ./core/src/main/scala/kafka/common/UnknownCodecException.scala AL ./core/src/main/scala/kafka/common/UnknownException.scala AL ./core/src/main/scala/kafka/common/UnknownMagicByteException.scala AL ./core/src/main/scala/kafka/common/UnknownTopicOrPartitionException.scala AL ./core/src/main/scala/kafka/consumer/ConsoleConsumer.scala AL ./core/src/main/scala/kafka/consumer/ConsumerConfig.scala AL ./core/src/main/scala/kafka/consumer/ConsumerConnector.scala AL ./core/src/main/scala/kafka/consumer/ConsumerFetcherManager.scala AL ./core/src/main/scala/kafka/consumer/ConsumerFetcherThread.scala AL ./core/src/main/scala/kafka/consumer/ConsumerIterator.scala !????? ./core/src/main/scala/kafka/consumer/ConsumerTopicStat.scala AL ./core/src/main/scala/kafka/consumer/ConsumerTopicStats.scala AL ./core/src/main/scala/kafka/consumer/FetchRequestAndResponseStats.scala AL ./core/src/main/scala/kafka/consumer/FetchedDataChunk.scala AL ./core/src/main/scala/kafka/consumer/KafkaStream.scala AL ./core/src/main/scala/kafka/consumer/PartitionTopicInfo.scala AL ./core/src/main/scala/kafka/consumer/SimpleConsumer.scala AL ./core/src/main/scala/kafka/consumer/TopicCount.scala AL ./core/src/main/scala/kafka/consumer/TopicEventHandler.scala AL ./core/src/main/scala/kafka/consumer/TopicFilter.scala AL ./core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala AL ./core/src/main/scala/kafka/consumer/ZookeeperTopicEventWatcher.scala !????? ./core/src/main/scala/kafka/consumer/package.html AL ./core/src/main/scala/kafka/controller/ControllerChannelManager.scala AL ./core/src/main/scala/kafka/controller/KafkaController.scala AL ./core/src/main/scala/kafka/controller/PartitionLeaderSelector.scala AL ./core/src/main/scala/kafka/controller/PartitionStateMachine.scala AL ./core/src/main/scala/kafka/controller/ReplicaStateMachine.scala AL ./core/src/main/scala/kafka/javaapi/FetchRequest.scala AL ./core/src/main/scala/kafka/javaapi/FetchResponse.scala AL ./core/src/main/scala/kafka/javaapi/Implicits.scala AL ./core/src/main/scala/kafka/javaapi/OffsetRequest.scala AL ./core/src/main/scala/kafka/javaapi/OffsetResponse.scala AL ./core/src/main/scala/kafka/javaapi/TopicMetadata.scala AL ./core/src/main/scala/kafka/javaapi/TopicMetadataRequest.scala AL ./core/src/main/scala/kafka/javaapi/TopicMetadataResponse.scala AL ./core/src/main/scala/kafka/javaapi/consumer/ConsumerConnector.java AL ./core/src/main/scala/kafka/javaapi/consumer/SimpleConsumer.scala AL ./core/src/main/scala/kafka/javaapi/consumer/ZookeeperConsumerConnector.scala AL ./core/src/main/scala/kafka/javaapi/message/ByteBufferMessageSet.scala AL ./core/src/main/scala/kafka/javaapi/message/MessageSet.scala AL ./core/src/main/scala/kafka/javaapi/producer/Producer.scala AL ./core/src/main/scala/kafka/log/FileMessageSet.scala AL ./core/src/main/scala/kafka/log/Log.scala AL ./core/src/main/scala/kafka/log/LogManager.scala !????? ./core/src/main/scala/kafka/log/LogSegment.scala AL ./core/src/main/scala/kafka/log/OffsetIndex.scala AL ./core/src/main/scala/kafka/log/OffsetPosition.scala AL ./core/src/main/scala/kafka/log/SegmentList.scala !????? ./core/src/main/scala/kafka/log/package.html AL ./core/src/main/scala/kafka/message/ByteBufferBackedInputStream.scala AL ./core/src/main/scala/kafka/message/ByteBufferMessageSet.scala AL ./core/src/main/scala/kafka/message/CompressionCodec.scala AL ./core/src/main/scala/kafka/message/CompressionFactory.scala AL ./core/src/main/scala/kafka/message/InvalidMessageException.scala AL ./core/src/main/scala/kafka/message/Message.scala AL ./core/src/main/scala/kafka/message/MessageAndMetadata.scala AL ./core/src/main/scala/kafka/message/MessageAndOffset.scala AL ./core/src/main/scala/kafka/message/MessageLengthException.scala AL ./core/src/main/scala/kafka/message/MessageSet.scala !????? ./core/src/main/scala/kafka/message/package.html AL ./core/src/main/scala/kafka/metrics/KafkaCSVMetricsReporter.scala AL ./core/src/main/scala/kafka/metrics/KafkaMetricsConfig.scala AL ./core/src/main/scala/kafka/metrics/KafkaMetricsGroup.scala AL ./core/src/main/scala/kafka/metrics/KafkaMetricsReporter.scala AL ./core/src/main/scala/kafka/metrics/KafkaTimer.scala AL ./core/src/main/scala/kafka/network/BlockingChannel.scala AL ./core/src/main/scala/kafka/network/BoundedByteBufferReceive.scala AL ./core/src/main/scala/kafka/network/BoundedByteBufferSend.scala AL ./core/src/main/scala/kafka/network/ByteBufferSend.scala AL ./core/src/main/scala/kafka/network/ConnectionConfig.scala AL ./core/src/main/scala/kafka/network/Handler.scala AL ./core/src/main/scala/kafka/network/InvalidRequestException.scala AL ./core/src/main/scala/kafka/network/RequestChannel.scala AL ./core/src/main/scala/kafka/network/SocketServer.scala AL ./core/src/main/scala/kafka/network/Transmission.scala !????? ./core/src/main/scala/kafka/network/package.html AL ./core/src/main/scala/kafka/producer/BrokerPartitionInfo.scala AL ./core/src/main/scala/kafka/producer/ConsoleProducer.scala AL ./core/src/main/scala/kafka/producer/DefaultPartitioner.scala AL ./core/src/main/scala/kafka/producer/KafkaLog4jAppender.scala AL ./core/src/main/scala/kafka/producer/KeyedMessage.scala AL ./core/src/main/scala/kafka/producer/Partitioner.scala AL ./core/src/main/scala/kafka/producer/Producer.scala AL ./core/src/main/scala/kafka/producer/ProducerClosedException.scala AL ./core/src/main/scala/kafka/producer/ProducerConfig.scala AL ./core/src/main/scala/kafka/producer/ProducerPool.scala AL ./core/src/main/scala/kafka/producer/ProducerRequestStats.scala AL ./core/src/main/scala/kafka/producer/ProducerStats.scala AL ./core/src/main/scala/kafka/producer/ProducerTopicStats.scala AL ./core/src/main/scala/kafka/producer/SyncProducer.scala AL ./core/src/main/scala/kafka/producer/SyncProducerConfig.scala AL ./core/src/main/scala/kafka/producer/async/AsyncProducerConfig.scala !????? ./core/src/main/scala/kafka/producer/async/AsyncProducerStats.scala AL ./core/src/main/scala/kafka/producer/async/DefaultEventHandler.scala AL ./core/src/main/scala/kafka/producer/async/EventHandler.scala AL ./core/src/main/scala/kafka/producer/async/IllegalQueueStateException.scala AL ./core/src/main/scala/kafka/producer/async/MissingConfigException.scala AL ./core/src/main/scala/kafka/producer/async/ProducerSendThread.scala AL ./core/src/main/scala/kafka/serializer/Decoder.scala AL ./core/src/main/scala/kafka/serializer/Encoder.scala AL ./core/src/main/scala/kafka/server/AbstractFetcherManager.scala AL ./core/src/main/scala/kafka/server/AbstractFetcherThread.scala AL ./core/src/main/scala/kafka/server/HighwaterMarkCheckpoint.scala AL ./core/src/main/scala/kafka/server/KafkaApis.scala AL ./core/src/main/scala/kafka/server/KafkaConfig.scala AL ./core/src/main/scala/kafka/server/KafkaRequestHandler.scala AL ./core/src/main/scala/kafka/server/KafkaServer.scala AL ./core/src/main/scala/kafka/server/KafkaServerStartable.scala AL ./core/src/main/scala/kafka/server/KafkaZooKeeper.scala AL ./core/src/main/scala/kafka/server/LeaderElector.scala AL ./core/src/main/scala/kafka/server/MessageSetSend.scala AL ./core/src/main/scala/kafka/server/ReplicaFetcherManager.scala AL ./core/src/main/scala/kafka/server/ReplicaFetcherThread.scala AL ./core/src/main/scala/kafka/server/ReplicaManager.scala AL ./core/src/main/scala/kafka/server/RequestPurgatory.scala AL ./core/src/main/scala/kafka/server/ZookeeperLeaderElector.scala !????? ./core/src/main/scala/kafka/server/package.html AL ./core/src/main/scala/kafka/tools/ConsumerOffsetChecker.scala AL ./core/src/main/scala/kafka/tools/DumpLogSegments.scala AL ./core/src/main/scala/kafka/tools/ExportZkOffsets.scala AL ./core/src/main/scala/kafka/tools/GetOffsetShell.scala AL ./core/src/main/scala/kafka/tools/ImportZkOffsets.scala AL ./core/src/main/scala/kafka/tools/JmxTool.scala AL ./core/src/main/scala/kafka/tools/KafkaMigrationTool.java AL ./core/src/main/scala/kafka/tools/MirrorMaker.scala AL ./core/src/main/scala/kafka/tools/ReplayLogProducer.scala AL ./core/src/main/scala/kafka/tools/SimpleConsumerShell.scala AL ./core/src/main/scala/kafka/tools/StateChangeLogMerger.scala AL ./core/src/main/scala/kafka/tools/UpdateOffsetsInZK.scala AL ./core/src/main/scala/kafka/tools/VerifyConsumerRebalance.scala AL ./core/src/main/scala/kafka/utils/Annotations.scala !????? ./core/src/main/scala/kafka/utils/CommandLineUtils.scala AL ./core/src/main/scala/kafka/utils/DelayedItem.scala !????? ./core/src/main/scala/kafka/utils/FileLock.scala AL ./core/src/main/scala/kafka/utils/IteratorTemplate.scala !????? ./core/src/main/scala/kafka/utils/Json.scala AL ./core/src/main/scala/kafka/utils/KafkaScheduler.scala AL ./core/src/main/scala/kafka/utils/Log4jController.scala AL ./core/src/main/scala/kafka/utils/Logging.scala AL ./core/src/main/scala/kafka/utils/MockTime.scala AL ./core/src/main/scala/kafka/utils/Mx4jLoader.scala AL ./core/src/main/scala/kafka/utils/Pool.scala AL ./core/src/main/scala/kafka/utils/Range.scala AL ./core/src/main/scala/kafka/utils/ShutdownableThread.scala AL ./core/src/main/scala/kafka/utils/Throttler.scala AL ./core/src/main/scala/kafka/utils/Time.scala !????? ./core/src/main/scala/kafka/utils/Topic.scala AL ./core/src/main/scala/kafka/utils/Utils.scala AL ./core/src/main/scala/kafka/utils/VerifiableProperties.scala AL ./core/src/main/scala/kafka/utils/ZkUtils.scala !????? ./core/src/main/scala/kafka/utils/package.html AL ./core/src/test/resources/log4j.properties AL ./core/src/test/scala/other/kafka.log4j.properties AL ./core/src/test/scala/other/kafka/DeleteZKPath.scala AL ./core/src/test/scala/other/kafka/StressTestLog.scala AL ./core/src/test/scala/other/kafka/TestEndToEndLatency.scala AL ./core/src/test/scala/other/kafka/TestKafkaAppender.scala AL ./core/src/test/scala/other/kafka/TestLinearWriteSpeed.scala AL ./core/src/test/scala/other/kafka/TestLogPerformance.scala AL ./core/src/test/scala/other/kafka/TestTruncate.scala AL ./core/src/test/scala/other/kafka/TestZKConsumerOffsets.scala AL ./core/src/test/scala/unit/kafka/admin/AdminTest.scala AL ./core/src/test/scala/unit/kafka/api/ApiUtilsTest.scala AL ./core/src/test/scala/unit/kafka/api/RequestResponseSerializationTest.scala AL ./core/src/test/scala/unit/kafka/common/ConfigTest.scala AL ./core/src/test/scala/unit/kafka/common/TopicTest.scala AL ./core/src/test/scala/unit/kafka/consumer/ConsumerIteratorTest.scala AL ./core/src/test/scala/unit/kafka/consumer/TopicFilterTest.scala AL ./core/src/test/scala/unit/kafka/consumer/ZookeeperConsumerConnectorTest.scala AL ./core/src/test/scala/unit/kafka/integration/AutoOffsetResetTest.scala AL ./core/src/test/scala/unit/kafka/integration/FetcherTest.scala AL ./core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala AL ./core/src/test/scala/unit/kafka/integration/LazyInitProducerTest.scala AL ./core/src/test/scala/unit/kafka/integration/PrimitiveApiTest.scala AL ./core/src/test/scala/unit/kafka/integration/ProducerConsumerTestHarness.scala AL ./core/src/test/scala/unit/kafka/integration/RollingBounceTest.scala AL ./core/src/test/scala/unit/kafka/integration/TopicMetadataTest.scala AL ./core/src/test/scala/unit/kafka/javaapi/consumer/ZookeeperConsumerConnectorTest.scala AL ./core/src/test/scala/unit/kafka/javaapi/message/BaseMessageSetTestCases.scala AL ./core/src/test/scala/unit/kafka/javaapi/message/ByteBufferMessageSetTest.scala AL ./core/src/test/scala/unit/kafka/log/FileMessageSetTest.scala AL ./core/src/test/scala/unit/kafka/log/LogManagerTest.scala AL ./core/src/test/scala/unit/kafka/log/LogOffsetTest.scala !????? ./core/src/test/scala/unit/kafka/log/LogSegmentTest.scala AL ./core/src/test/scala/unit/kafka/log/LogTest.scala AL ./core/src/test/scala/unit/kafka/log/OffsetIndexTest.scala AL ./core/src/test/scala/unit/kafka/log/SegmentListTest.scala AL ./core/src/test/scala/unit/kafka/log4j/KafkaLog4jAppenderTest.scala AL ./core/src/test/scala/unit/kafka/message/BaseMessageSetTestCases.scala AL ./core/src/test/scala/unit/kafka/message/ByteBufferMessageSetTest.scala AL ./core/src/test/scala/unit/kafka/message/MessageCompressionTest.scala AL ./core/src/test/scala/unit/kafka/message/MessageTest.scala AL ./core/src/test/scala/unit/kafka/metrics/KafkaTimerTest.scala AL ./core/src/test/scala/unit/kafka/network/SocketServerTest.scala AL ./core/src/test/scala/unit/kafka/producer/AsyncProducerTest.scala AL ./core/src/test/scala/unit/kafka/producer/ProducerTest.scala AL ./core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala AL ./core/src/test/scala/unit/kafka/server/HighwatermarkPersistenceTest.scala AL ./core/src/test/scala/unit/kafka/server/ISRExpirationTest.scala AL ./core/src/test/scala/unit/kafka/server/LeaderElectionTest.scala AL ./core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala AL ./core/src/test/scala/unit/kafka/server/ReplicaFetchTest.scala AL ./core/src/test/scala/unit/kafka/server/RequestPurgatoryTest.scala AL ./core/src/test/scala/unit/kafka/server/ServerShutdownTest.scala AL ./core/src/test/scala/unit/kafka/server/SimpleFetchTest.scala AL ./core/src/test/scala/unit/kafka/utils/TestUtils.scala AL ./core/src/test/scala/unit/kafka/utils/UtilsTest.scala AL ./core/src/test/scala/unit/kafka/zk/EmbeddedZookeeper.scala AL ./core/src/test/scala/unit/kafka/zk/ZKEphemeralTest.scala AL ./core/src/test/scala/unit/kafka/zk/ZooKeeperTestHarness.scala N ./examples/README !????? ./examples/build.sbt AL ./examples/bin/java-producer-consumer-demo.sh AL ./examples/bin/java-simple-consumer-demo.sh AL ./examples/src/main/java/kafka/examples/Consumer.java AL ./examples/src/main/java/kafka/examples/KafkaConsumerProducerDemo.java AL ./examples/src/main/java/kafka/examples/KafkaProperties.java AL ./examples/src/main/java/kafka/examples/Producer.java AL ./examples/src/main/java/kafka/examples/SimpleConsumerDemo.java A ./lib/apache-rat-0.8.jar A ./lib/sbt-launch.jar !????? ./perf/build.sbt AL ./perf/config/log4j.properties AL ./perf/src/main/scala/kafka/perf/ConsumerPerformance.scala AL ./perf/src/main/scala/kafka/perf/PerfConfig.scala AL ./perf/src/main/scala/kafka/perf/ProducerPerformance.scala AL ./perf/src/main/scala/kafka/perf/SimpleConsumerPerformance.scala AL ./project/Build.scala AL ./project/build.properties !????? ./project/plugins.sbt AL ./project/build/KafkaProject.scala N ./system_test/README.txt !????? ./system_test/__init__.py !????? ./system_test/cluster_config.json !????? ./system_test/logging.conf !????? ./system_test/metrics.json !????? ./system_test/run_sanity.sh AL ./system_test/system_test_env.py AL ./system_test/system_test_runner.py !????? ./system_test/testcase_to_run.json !????? ./system_test/testcase_to_run_all.json !????? ./system_test/testcase_to_run_sanity.json !????? ./system_test/testcase_to_skip.json N ./system_test/broker_failure/README AL ./system_test/broker_failure/bin/kafka-run-class.sh AL ./system_test/broker_failure/bin/run-test.sh AL ./system_test/broker_failure/config/log4j.properties AL ./system_test/broker_failure/config/mirror_producer.properties AL ./system_test/broker_failure/config/mirror_producer1.properties AL ./system_test/broker_failure/config/mirror_producer2.properties AL ./system_test/broker_failure/config/mirror_producer3.properties AL ./system_test/broker_failure/config/server_source1.properties AL ./system_test/broker_failure/config/server_source2.properties AL ./system_test/broker_failure/config/server_source3.properties AL ./system_test/broker_failure/config/server_source4.properties AL ./system_test/broker_failure/config/server_target1.properties AL ./system_test/broker_failure/config/server_target2.properties AL ./system_test/broker_failure/config/server_target3.properties AL ./system_test/broker_failure/config/whitelisttest.consumer.properties AL ./system_test/broker_failure/config/zookeeper_source.properties AL ./system_test/broker_failure/config/zookeeper_target.properties AL ./system_test/common/util.sh !????? ./system_test/migration_tool_testsuite/__init__.py !????? ./system_test/migration_tool_testsuite/cluster_config.json AL ./system_test/migration_tool_testsuite/migration_tool_test.py AL ./system_test/migration_tool_testsuite/0.7/bin/kafka-run-class.sh AL ./system_test/migration_tool_testsuite/0.7/bin/zookeeper-server-start.sh A ./system_test/migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar A ./system_test/migration_tool_testsuite/0.7/lib/kafka-perf-0.7.0.jar A ./system_test/migration_tool_testsuite/0.7/lib/zkclient-0.1.jar AL ./system_test/migration_tool_testsuite/config/migration_consumer.properties AL ./system_test/migration_tool_testsuite/config/migration_producer.properties AL ./system_test/migration_tool_testsuite/config/server.properties AL ./system_test/migration_tool_testsuite/config/zookeeper.properties !????? ./system_test/migration_tool_testsuite/testcase_9001/testcase_9001_properties.json !????? ./system_test/migration_tool_testsuite/testcase_9003/cluster_config.json !????? ./system_test/migration_tool_testsuite/testcase_9003/testcase_9003_properties.json !????? ./system_test/migration_tool_testsuite/testcase_9004/cluster_config.json !????? ./system_test/migration_tool_testsuite/testcase_9004/testcase_9004_properties.json !????? ./system_test/migration_tool_testsuite/testcase_9005/cluster_config.json !????? ./system_test/migration_tool_testsuite/testcase_9005/testcase_9005_properties.json !????? ./system_test/migration_tool_testsuite/testcase_9006/cluster_config.json !????? ./system_test/migration_tool_testsuite/testcase_9006/testcase_9006_properties.json N ./system_test/mirror_maker/README !????? ./system_test/mirror_maker/bin/expected.out AL ./system_test/mirror_maker/bin/run-test.sh AL ./system_test/mirror_maker/config/blacklisttest.consumer.properties AL ./system_test/mirror_maker/config/mirror_producer.properties AL ./system_test/mirror_maker/config/server_source_1_1.properties AL ./system_test/mirror_maker/config/server_source_1_2.properties AL ./system_test/mirror_maker/config/server_source_2_1.properties AL ./system_test/mirror_maker/config/server_source_2_2.properties AL ./system_test/mirror_maker/config/server_target_1_1.properties AL ./system_test/mirror_maker/config/server_target_1_2.properties AL ./system_test/mirror_maker/config/whitelisttest_1.consumer.properties AL ./system_test/mirror_maker/config/whitelisttest_2.consumer.properties AL ./system_test/mirror_maker/config/zookeeper_source_1.properties AL ./system_test/mirror_maker/config/zookeeper_source_2.properties AL ./system_test/mirror_maker/config/zookeeper_target.properties !????? ./system_test/mirror_maker_testsuite/__init__.py !????? ./system_test/mirror_maker_testsuite/cluster_config.json AL ./system_test/mirror_maker_testsuite/mirror_maker_test.py !????? ./system_test/mirror_maker_testsuite/config/console_consumer.properties !????? ./system_test/mirror_maker_testsuite/config/consumer.properties !????? ./system_test/mirror_maker_testsuite/config/log4j.properties !????? ./system_test/mirror_maker_testsuite/config/mirror_consumer.properties !????? ./system_test/mirror_maker_testsuite/config/mirror_producer.properties !????? ./system_test/mirror_maker_testsuite/config/producer.properties !????? ./system_test/mirror_maker_testsuite/config/producer_performance.properties AL ./system_test/mirror_maker_testsuite/config/server.properties AL ./system_test/mirror_maker_testsuite/config/zookeeper.properties !????? ./system_test/mirror_maker_testsuite/testcase_5001/testcase_5001_properties.json !????? ./system_test/mirror_maker_testsuite/testcase_5002/testcase_5002_properties.json !????? ./system_test/mirror_maker_testsuite/testcase_5003/cluster_config.json !????? ./system_test/mirror_maker_testsuite/testcase_5003/testcase_5003_properties.json !????? ./system_test/mirror_maker_testsuite/testcase_5004/cluster_config.json !????? ./system_test/mirror_maker_testsuite/testcase_5004/testcase_5004_properties.json !????? ./system_test/mirror_maker_testsuite/testcase_5005/cluster_config.json !????? ./system_test/mirror_maker_testsuite/testcase_5005/testcase_5005_properties.json !????? ./system_test/mirror_maker_testsuite/testcase_5006/cluster_config.json !????? ./system_test/mirror_maker_testsuite/testcase_5006/testcase_5006_properties.json N ./system_test/producer_perf/README !????? ./system_test/producer_perf/bin/expected.out AL ./system_test/producer_perf/bin/run-compression-test.sh AL ./system_test/producer_perf/bin/run-test.sh AL ./system_test/producer_perf/config/server.properties AL ./system_test/producer_perf/config/zookeeper.properties !????? ./system_test/replication_testsuite/__init__.py AL ./system_test/replication_testsuite/replica_basic_test.py !????? ./system_test/replication_testsuite/config/console_consumer.properties !????? ./system_test/replication_testsuite/config/consumer.properties !????? ./system_test/replication_testsuite/config/log4j.properties !????? ./system_test/replication_testsuite/config/producer.properties !????? ./system_test/replication_testsuite/config/producer_performance.properties AL ./system_test/replication_testsuite/config/server.properties AL ./system_test/replication_testsuite/config/zookeeper.properties !????? ./system_test/replication_testsuite/testcase_0001/testcase_0001_properties.json !????? ./system_test/replication_testsuite/testcase_0002/testcase_0002_properties.json !????? ./system_test/replication_testsuite/testcase_0003/testcase_0003_properties.json !????? ./system_test/replication_testsuite/testcase_0004/testcase_0004_properties.json !????? ./system_test/replication_testsuite/testcase_0005/testcase_0005_properties.json !????? ./system_test/replication_testsuite/testcase_0006/testcase_0006_properties.json !????? ./system_test/replication_testsuite/testcase_0007/testcase_0007_properties.json !????? ./system_test/replication_testsuite/testcase_0008/testcase_0008_properties.json !????? ./system_test/replication_testsuite/testcase_0009/testcase_0009_properties.json !????? ./system_test/replication_testsuite/testcase_0010/testcase_0010_properties.json !????? ./system_test/replication_testsuite/testcase_0011/testcase_0011_properties.json !????? ./system_test/replication_testsuite/testcase_0021/cluster_config.json !????? ./system_test/replication_testsuite/testcase_0021/testcase_0021_properties.json !????? ./system_test/replication_testsuite/testcase_0022/cluster_config.json !????? ./system_test/replication_testsuite/testcase_0022/testcase_0022_properties.json !????? ./system_test/replication_testsuite/testcase_0023/cluster_config.json !????? ./system_test/replication_testsuite/testcase_0023/testcase_0023_properties.json !????? ./system_test/replication_testsuite/testcase_0024/testcase_0024_properties.json !????? ./system_test/replication_testsuite/testcase_0101/testcase_0101_properties.json !????? ./system_test/replication_testsuite/testcase_0102/testcase_0102_properties.json !????? ./system_test/replication_testsuite/testcase_0103/testcase_0103_properties.json !????? ./system_test/replication_testsuite/testcase_0104/testcase_0104_properties.json !????? ./system_test/replication_testsuite/testcase_0105/testcase_0105_properties.json !????? ./system_test/replication_testsuite/testcase_0106/testcase_0106_properties.json !????? ./system_test/replication_testsuite/testcase_0107/testcase_0107_properties.json !????? ./system_test/replication_testsuite/testcase_0108/testcase_0108_properties.json !????? ./system_test/replication_testsuite/testcase_0109/testcase_0109_properties.json !????? ./system_test/replication_testsuite/testcase_0110/testcase_0110_properties.json !????? ./system_test/replication_testsuite/testcase_0111/testcase_0111_properties.json !????? ./system_test/replication_testsuite/testcase_0112/testcase_0112_properties.json !????? ./system_test/replication_testsuite/testcase_0113/testcase_0113_properties.json !????? ./system_test/replication_testsuite/testcase_0114/testcase_0114_properties.json !????? ./system_test/replication_testsuite/testcase_0115/testcase_0115_properties.json !????? ./system_test/replication_testsuite/testcase_0116/testcase_0116_properties.json !????? ./system_test/replication_testsuite/testcase_0117/testcase_0117_properties.json !????? ./system_test/replication_testsuite/testcase_0118/testcase_0118_properties.json !????? ./system_test/replication_testsuite/testcase_0119/testcase_0119_properties.json !????? ./system_test/replication_testsuite/testcase_0121/cluster_config.json !????? ./system_test/replication_testsuite/testcase_0121/testcase_0121_properties.json !????? ./system_test/replication_testsuite/testcase_0122/cluster_config.json !????? ./system_test/replication_testsuite/testcase_0122/testcase_0122_properties.json !????? ./system_test/replication_testsuite/testcase_0123/cluster_config.json !????? ./system_test/replication_testsuite/testcase_0123/testcase_0123_properties.json !????? ./system_test/replication_testsuite/testcase_0124/cluster_config.json !????? ./system_test/replication_testsuite/testcase_0124/testcase_0124_properties.json !????? ./system_test/replication_testsuite/testcase_0125/cluster_config.json !????? ./system_test/replication_testsuite/testcase_0125/testcase_0125_properties.json !????? ./system_test/replication_testsuite/testcase_0126/cluster_config.json !????? ./system_test/replication_testsuite/testcase_0126/testcase_0126_properties.json !????? ./system_test/replication_testsuite/testcase_0127/cluster_config.json !????? ./system_test/replication_testsuite/testcase_0127/testcase_0127_properties.json !????? ./system_test/replication_testsuite/testcase_0128/testcase_0128_properties.json !????? ./system_test/replication_testsuite/testcase_0131/cluster_config.json !????? ./system_test/replication_testsuite/testcase_0131/testcase_0131_properties.json !????? ./system_test/replication_testsuite/testcase_0132/cluster_config.json !????? ./system_test/replication_testsuite/testcase_0132/testcase_0132_properties.json !????? ./system_test/replication_testsuite/testcase_0133/cluster_config.json !????? ./system_test/replication_testsuite/testcase_0133/testcase_0133_properties.json !????? ./system_test/replication_testsuite/testcase_0134/testcase_0134_properties.json !????? ./system_test/replication_testsuite/testcase_0151/testcase_0151_properties.json !????? ./system_test/replication_testsuite/testcase_0152/testcase_0152_properties.json !????? ./system_test/replication_testsuite/testcase_0153/testcase_0153_properties.json !????? ./system_test/replication_testsuite/testcase_0154/testcase_0154_properties.json !????? ./system_test/replication_testsuite/testcase_0155/testcase_0155_properties.json !????? ./system_test/replication_testsuite/testcase_0156/testcase_0156_properties.json !????? ./system_test/replication_testsuite/testcase_0157/testcase_0157_properties.json !????? ./system_test/replication_testsuite/testcase_0158/testcase_0158_properties.json !????? ./system_test/replication_testsuite/testcase_0159/testcase_0159_properties.json !????? ./system_test/replication_testsuite/testcase_0201/testcase_0201_properties.json !????? ./system_test/replication_testsuite/testcase_0202/testcase_0202_properties.json !????? ./system_test/replication_testsuite/testcase_0203/testcase_0203_properties.json !????? ./system_test/replication_testsuite/testcase_0204/testcase_0204_properties.json !????? ./system_test/replication_testsuite/testcase_0205/testcase_0205_properties.json !????? ./system_test/replication_testsuite/testcase_0206/testcase_0206_properties.json !????? ./system_test/replication_testsuite/testcase_0207/testcase_0207_properties.json !????? ./system_test/replication_testsuite/testcase_0208/testcase_0208_properties.json !????? ./system_test/replication_testsuite/testcase_0209/testcase_0209_properties.json !????? ./system_test/replication_testsuite/testcase_0251/testcase_0251_properties.json !????? ./system_test/replication_testsuite/testcase_0252/testcase_0252_properties.json !????? ./system_test/replication_testsuite/testcase_0253/testcase_0253_properties.json !????? ./system_test/replication_testsuite/testcase_0254/testcase_0254_properties.json !????? ./system_test/replication_testsuite/testcase_0255/testcase_0255_properties.json !????? ./system_test/replication_testsuite/testcase_0256/testcase_0256_properties.json !????? ./system_test/replication_testsuite/testcase_0257/testcase_0257_properties.json !????? ./system_test/replication_testsuite/testcase_0258/testcase_0258_properties.json !????? ./system_test/replication_testsuite/testcase_0259/testcase_0259_properties.json !????? ./system_test/replication_testsuite/testcase_0301/testcase_0301_properties.json !????? ./system_test/replication_testsuite/testcase_0302/testcase_0302_properties.json !????? ./system_test/replication_testsuite/testcase_0303/testcase_0303_properties.json !????? ./system_test/replication_testsuite/testcase_0304/testcase_0304_properties.json !????? ./system_test/replication_testsuite/testcase_0305/testcase_0305_properties.json !????? ./system_test/replication_testsuite/testcase_0306/testcase_0306_properties.json !????? ./system_test/replication_testsuite/testcase_0307/testcase_0307_properties.json !????? ./system_test/replication_testsuite/testcase_0308/testcase_0308_properties.json !????? ./system_test/replication_testsuite/testcase_0309/testcase_0309_properties.json !????? ./system_test/replication_testsuite/testcase_1/cluster_config.json !????? ./system_test/replication_testsuite/testcase_1/testcase_1_properties.json !????? ./system_test/replication_testsuite/testcase_4001/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4001/testcase_4001_properties.json !????? ./system_test/replication_testsuite/testcase_4002/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4002/testcase_4002_properties.json !????? ./system_test/replication_testsuite/testcase_4003/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4003/testcase_4003_properties.json !????? ./system_test/replication_testsuite/testcase_4004/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4004/testcase_4004_properties.json !????? ./system_test/replication_testsuite/testcase_4005/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4005/testcase_4005_properties.json !????? ./system_test/replication_testsuite/testcase_4006/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4006/testcase_4006_properties.json !????? ./system_test/replication_testsuite/testcase_4007/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4007/testcase_4007_properties.json !????? ./system_test/replication_testsuite/testcase_4008/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4008/testcase_4008_properties.json !????? ./system_test/replication_testsuite/testcase_4011/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4011/testcase_4011_properties.json !????? ./system_test/replication_testsuite/testcase_4012/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4012/testcase_4012_properties.json !????? ./system_test/replication_testsuite/testcase_4013/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4013/testcase_4013_properties.json !????? ./system_test/replication_testsuite/testcase_4014/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4014/testcase_4014_properties.json !????? ./system_test/replication_testsuite/testcase_4015/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4015/testcase_4015_properties.json !????? ./system_test/replication_testsuite/testcase_4016/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4016/testcase_4016_properties.json !????? ./system_test/replication_testsuite/testcase_4017/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4017/testcase_4017_properties.json !????? ./system_test/replication_testsuite/testcase_4018/cluster_config.json !????? ./system_test/replication_testsuite/testcase_4018/testcase_4018_properties.json !????? ./system_test/replication_testsuite/testcase_9051/cluster_config.json !????? ./system_test/replication_testsuite/testcase_9051/testcase_9051_properties.json !????? ./system_test/utils/__init__.py AL ./system_test/utils/kafka_system_test_utils.py AL ./system_test/utils/metrics.py AL ./system_test/utils/pyh.py AL ./system_test/utils/replication_utils.py AL ./system_test/utils/setup_utils.py AL ./system_test/utils/system_test_utils.py AL ./system_test/utils/testcase_env.py ***************************************************** Printing headers for files without AL header... ======================================================================= ==./.gitignore ======================================================================= dist *classes target/ lib_managed/ src_managed/ project/boot/ project/plugins/project/ project/sbt_project_definition.iml .idea .svn .classpath *~ *# .#* rat.out TAGS ======================================================================= ==./.rat-excludes ======================================================================= .rat-excludes rat.out sbt sbt.boot.lock README* .gitignore .git .svn build.properties target src_managed update.log clients/target core/target contrib/target project/plugins/target project/build/target *.iml *.csproj TODO Makefile* *.html *.xml *expected.out *.kafka ======================================================================= ==./README.md ======================================================================= # Kafka is a distributed publish/subscribe messaging system # It is designed to support the following * Persistent messaging with O(1) disk structures that provide constant time performance even with many TB of stored messages. * High-throughput: even with very modest hardware Kafka can support hundreds of thousands of messages per second. * Explicit support for partitioning messages over Kafka servers and distributing consumption over a cluster of consumer machines while maintaining per-partition ordering semantics. * Support for parallel data load into Hadoop. Kafka is aimed at providing a publish-subscribe solution that can handle all activity stream data and processing on a consumer-scale web site. This kind of activity (page views, searches, and other user actions) are a key ingredient in many of the social feature on the modern web. This data is typically handled by "logging" and ad hoc log aggregation solutions due to the throughput requirements. This kind of ad hoc solution is a viable solution to providing logging data to an offline analysis system like Hadoop, but is very limiting for building real-time processing. Kafka aims to unify offline and online processing by providing a mechanism for parallel load into Hadoop as well as the ability to partition real-time consumption over a cluster of machines. See our [web site](http://kafka.apache.org/) for more details on the project. ## Contribution ## Kafka is a new project, and we are interested in building the community; we would welcome any thoughts or [patches](https://issues.apache.org/jira/browse/KAFKA). You can reach us [on the Apache mailing lists](http://kafka.apache.org/contact.html). The Kafka code is available from: * git clone http://git-wip-us.apache.org/repos/asf/kafka.git kafka To contribute you can follow: * https://cwiki.apache.org/confluence/display/KAFKA/Git+Workflow To build for all supported versions of Scala: 1. ./sbt +package To build for a particular version of Scala (either 2.8.0, 2.8.2, 2.9.1 or 2.9.2): 1. ./sbt "++2.8.0 package" *or* ./sbt "++2.8.2 package" *or* ./sbt "++2.9.1 package" *or* ./sbt "++2.9.2 package" Here are some useful sbt commands, to be executed at the sbt command prompt (./sbt). Prefixing with "++ " runs the command for a specific Scala version, prefixing with "+" will perform the action for all versions of Scala, and no prefix runs the command for the default (2.8.0) version of Scala. - tasks : Lists all the sbt commands and their descriptions clean : Deletes all generated files (the target directory). compile : Compile all the sub projects, but not create the jars test : Run all unit tests in all sub projects release-zip : Create all the jars, run unit tests and create a deployable release zip package: Creates jars for src, test, docs etc projects : List all the sub projects project sub_project_name : Switch to a particular sub-project. For example, to switch to the core kafka code, use "project core-kafka" ======================================================================= ==./contrib/hadoop-consumer/build.sbt ======================================================================= crossPaths := false ======================================================================= ==./contrib/hadoop-producer/README.md ======================================================================= Hadoop to Kafka Bridge ====================== What's new? ----------- * Kafka 0.8 support * No more ZK-based load balancing (backwards incompatible change) * Semantic partitioning is now supported in KafkaOutputFormat. Just specify a key in the output committer of your job. The Pig StoreFunc doesn't support semantic partitioning. * Config parameters are now the same as the Kafka producer, just prepended with kafka.output (e.g., kafka.output.max.message.size). This is a backwards incompatible change. What is it? ----------- The Hadoop to Kafka bridge is a way to publish data from Hadoop to Kafka. There are two possible mechanisms, varying from easy to difficult: writing a Pig script and writing messages in Avro format, or rolling your own job using the Kafka `OutputFormat`. Note that there are no write-once semantics: any client of the data must handle messages in an idempotent manner. That is, because of node failures and Hadoop's failure recovery, it's possible that the same message is published multiple times in the same push. How do I use it? ---------------- With this bridge, Kafka topics are URIs and are specified as URIs of the form `kafka:///` to connect to a specific Kafka broker. ### Pig ### Pig bridge writes data in binary Avro format with one message created per input row. To push data via Kafka, store to the Kafka URI using `AvroKafkaStorage` with the Avro schema as its first argument. You'll need to register the appropriate Kafka JARs. Here is what an example Pig script looks like: REGISTER hadoop-producer_2.8.0-0.8.0.jar; REGISTER avro-1.4.0.jar; REGISTER piggybank.jar; REGISTER kafka-0.8.0.jar; REGISTER jackson-core-asl-1.5.5.jar; REGISTER jackson-mapper-asl-1.5.5.jar; REGISTER scala-library.jar; member_info = LOAD 'member_info.tsv' AS (member_id : int, name : chararray); ======================================================================= ==./contrib/hadoop-producer/build.sbt ======================================================================= crossPaths := false ======================================================================= ==./core/build.sbt ======================================================================= import sbt._ import Keys._ import AssemblyKeys._ name := "kafka" resolvers ++= Seq( "SonaType ScalaTest repo" at "https://oss.sonatype.org/content/groups/public/org/scalatest/" ) libraryDependencies <+= scalaVersion("org.scala-lang" % "scala-compiler" % _ ) libraryDependencies ++= Seq( "org.apache.zookeeper" % "zookeeper" % "3.3.4", "com.101tec" % "zkclient" % "0.3", "org.xerial.snappy" % "snappy-java" % "1.0.4.1", "com.yammer.metrics" % "metrics-core" % "2.2.0", "com.yammer.metrics" % "metrics-annotation" % "2.2.0", "org.easymock" % "easymock" % "3.0" % "test", "junit" % "junit" % "4.1" % "test" ) libraryDependencies <<= (scalaVersion, libraryDependencies) { (sv, deps) => deps :+ (sv match { case "2.8.0" => "org.scalatest" % "scalatest" % "1.2" % "test" case _ => "org.scalatest" %% "scalatest" % "1.8" % "test" }) } assemblySettings ======================================================================= ==./core/src/main/scala/kafka/admin/CheckReassignmentStatus.scala ======================================================================= package kafka.admin import joptsimple.OptionParser import org.I0Itec.zkclient.ZkClient import kafka.utils._ import scala.collection.Map import kafka.common.TopicAndPartition object CheckReassignmentStatus extends Logging { def main(args: Array[String]): Unit = { val parser = new OptionParser val jsonFileOpt = parser.accepts("path-to-json-file", "REQUIRED: The JSON file with the list of partitions and the " + "new replicas they should be reassigned to") .withRequiredArg .describedAs("partition reassignment json file path") .ofType(classOf[String]) val zkConnectOpt = parser.accepts("zookeeper", "REQUIRED: The connection string for the zookeeper connection in the " + "form host:port. Multiple URLS can be given to allow fail-over.") .withRequiredArg .describedAs("urls") .ofType(classOf[String]) val options = parser.parse(args : _*) for(arg <- List(jsonFileOpt, zkConnectOpt)) { if(!options.has(arg)) { System.err.println("Missing required argument \"" + arg + "\"") parser.printHelpOn(System.err) System.exit(1) } } val jsonFile = options.valueOf(jsonFileOpt) val zkConnect = options.valueOf(zkConnectOpt) val jsonString = Utils.readFileAsString(jsonFile) val zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer) try { // read the json file into a string val partitionsToBeReassigned = Json.parseFull(jsonString) match { case Some(reassignedPartitions) => val partitions = reassignedPartitions.asInstanceOf[Array[Map[String, String]]] partitions.map { m => val topic = m.asInstanceOf[Map[String, String]].get("topic").get val partition = m.asInstanceOf[Map[String, String]].get("partition").get.toInt val replicasList = m.asInstanceOf[Map[String, String]].get("replicas").get val newReplicas = replicasList.split(",").map(_.toInt) (TopicAndPartition(topic, partition), newReplicas.toSeq) ======================================================================= ==./core/src/main/scala/kafka/api/ApiUtils.scala ======================================================================= package kafka.api import java.nio._ import kafka.common._ /** * Helper functions specific to parsing or serializing requests and responses */ object ApiUtils { val ProtocolEncoding = "UTF-8" /** * Read size prefixed string where the size is stored as a 2 byte short. * @param buffer The buffer to read from */ def readShortString(buffer: ByteBuffer): String = { val size: Int = buffer.getShort() if(size < 0) return null val bytes = new Array[Byte](size) buffer.get(bytes) new String(bytes, ProtocolEncoding) } /** * Write a size prefixed string where the size is stored as a 2 byte short * @param buffer The buffer to write to * @param string The string to write */ def writeShortString(buffer: ByteBuffer, string: String) { if(string == null) { buffer.putShort(-1) } else { val encodedString = string.getBytes(ProtocolEncoding) if(encodedString.length > Short.MaxValue) { throw new KafkaException("String exceeds the maximum size of " + Short.MaxValue + ".") } else { buffer.putShort(encodedString.length.asInstanceOf[Short]) buffer.put(encodedString) } } } /** * Return size of a size prefixed string where the size is stored as a 2 byte short * @param string The string to write */ def shortStringLength(string: String): Int = { if(string == null) { ======================================================================= ==./core/src/main/scala/kafka/api/UpdateMetadataRequest.scala ======================================================================= package kafka.api import java.nio.ByteBuffer import kafka.api.ApiUtils._ import kafka.cluster.Broker import kafka.common.{ErrorMapping, TopicAndPartition} import kafka.network.{BoundedByteBufferSend, RequestChannel} import kafka.network.RequestChannel.Response object UpdateMetadataRequest { val CurrentVersion = 0.shortValue val IsInit: Boolean = true val NotInit: Boolean = false val DefaultAckTimeout: Int = 1000 def readFrom(buffer: ByteBuffer): UpdateMetadataRequest = { val versionId = buffer.getShort val correlationId = buffer.getInt val clientId = readShortString(buffer) val controllerId = buffer.getInt val controllerEpoch = buffer.getInt val partitionStateInfosCount = buffer.getInt val partitionStateInfos = new collection.mutable.HashMap[TopicAndPartition, PartitionStateInfo] for(i <- 0 until partitionStateInfosCount){ val topic = readShortString(buffer) val partition = buffer.getInt val partitionStateInfo = PartitionStateInfo.readFrom(buffer) partitionStateInfos.put(TopicAndPartition(topic, partition), partitionStateInfo) } val numAliveBrokers = buffer.getInt val aliveBrokers = for(i <- 0 until numAliveBrokers) yield Broker.readFrom(buffer) new UpdateMetadataRequest(versionId, correlationId, clientId, controllerId, controllerEpoch, partitionStateInfos.toMap, aliveBrokers.toSet) } } case class UpdateMetadataRequest (versionId: Short, override val correlationId: Int, clientId: String, controllerId: Int, controllerEpoch: Int, partitionStateInfos: Map[TopicAndPartition, PartitionStateInfo], aliveBrokers: Set[Broker]) extends RequestOrResponse(Some(RequestKeys.UpdateMetadataKey), correlationId) { def this(controllerId: Int, controllerEpoch: Int, correlationId: Int, clientId: String, partitionStateInfos: Map[TopicAndPartition, PartitionStateInfo], aliveBrokers: Set[Broker]) = { ======================================================================= ==./core/src/main/scala/kafka/client/ClientUtils.scala ======================================================================= package kafka.client import scala.collection._ import kafka.cluster._ import kafka.api._ import kafka.producer._ import kafka.common.KafkaException import kafka.utils.{Utils, Logging} import java.util.Properties import util.Random /** * Helper functions common to clients (producer, consumer, or admin) */ object ClientUtils extends Logging{ /** * Used by the producer to send a metadata request since it has access to the ProducerConfig * @param topics The topics for which the metadata needs to be fetched * @param brokers The brokers in the cluster as configured on the producer through metadata.broker.list * @param producerConfig The producer's config * @return topic metadata response */ def fetchTopicMetadata(topics: Set[String], brokers: Seq[Broker], producerConfig: ProducerConfig, correlationId: Int): TopicMetadataResponse = { var fetchMetaDataSucceeded: Boolean = false var i: Int = 0 val topicMetadataRequest = new TopicMetadataRequest(TopicMetadataRequest.CurrentVersion, correlationId, producerConfig.clientId, topics.toSeq) var topicMetadataResponse: TopicMetadataResponse = null var t: Throwable = null // shuffle the list of brokers before sending metadata requests so that most requests don't get routed to the // same broker val shuffledBrokers = Random.shuffle(brokers) while(i < shuffledBrokers.size && !fetchMetaDataSucceeded) { val producer: SyncProducer = ProducerPool.createSyncProducer(producerConfig, shuffledBrokers(i)) info("Fetching metadata from broker %s with correlation id %d for %d topic(s) %s".format(shuffledBrokers(i), correlationId, topics.size, topics)) try { topicMetadataResponse = producer.send(topicMetadataRequest) fetchMetaDataSucceeded = true } catch { case e => warn("Fetching topic metadata with correlation id %d for topics [%s] from broker [%s] failed" .format(correlationId, topics, shuffledBrokers(i).toString), e) t = e } finally { i = i + 1 producer.close() } } if(!fetchMetaDataSucceeded) { ======================================================================= ==./core/src/main/scala/kafka/common/BrokerNotExistException.scala ======================================================================= ======================================================================= ==./core/src/main/scala/kafka/consumer/ConsumerTopicStat.scala ======================================================================= ======================================================================= ==./core/src/main/scala/kafka/consumer/package.html ======================================================================= This is the consumer API for kafka. ======================================================================= ==./core/src/main/scala/kafka/log/LogSegment.scala ======================================================================= package kafka.log import scala.math._ import java.io.File import kafka.message._ import kafka.utils._ /** * A segment of the log. Each segment has two components: a log and an index. The log is a FileMessageSet containing * the actual messages. The index is an OffsetIndex that maps from logical offsets to physical file positions. Each * segment has a base offset which is an offset <= the least offset of any message in this segment and > any offset in * any previous segment. * * A segment with a base offset of [base_offset] would be stored in two files, a [base_offset].index and a [base_offset].log file. */ @nonthreadsafe class LogSegment(val messageSet: FileMessageSet, val index: OffsetIndex, val start: Long, val indexIntervalBytes: Int, time: Time) extends Range with Logging { var firstAppendTime: Option[Long] = if (messageSet.sizeInBytes > 0) Some(time.milliseconds) else None /* the number of bytes since we last added an entry in the offset index */ var bytesSinceLastIndexEntry = 0 @volatile var deleted = false def this(dir: File, startOffset: Long, indexIntervalBytes: Int, maxIndexSize: Int) = this(new FileMessageSet(file = Log.logFilename(dir, startOffset)), new OffsetIndex(file = Log.indexFilename(dir, startOffset), baseOffset = startOffset, maxIndexSize = maxIndexSize), startOffset, indexIntervalBytes, SystemTime) /* Return the size in bytes of this log segment */ def size: Long = messageSet.sizeInBytes() def updateFirstAppendTime() { if (firstAppendTime.isEmpty) firstAppendTime = Some(time.milliseconds) } /** * Append the given messages starting with the given offset. Add ======================================================================= ==./core/src/main/scala/kafka/log/package.html ======================================================================= The log management system for Kafka. ======================================================================= ==./core/src/main/scala/kafka/message/package.html ======================================================================= Messages and everything related to them. ======================================================================= ==./core/src/main/scala/kafka/network/package.html ======================================================================= The network server for kafka. Now application specific code here, just general network server stuff.
The classes Receive and Send encapsulate the incoming and outgoing transmission of bytes. A Handler is a mapping between a Receive and a Send, and represents the users hook to add logic for mapping requests to actual processing code. Any uncaught exceptions in the reading or writing of the transmissions will result in the server logging an error and closing the offending socket. As a result it is the duty of the Handler implementation to catch and serialize any application-level errors that should be sent to the client.
This slightly lower-level interface that models sending and receiving rather than requests and responses is necessary in order to allow the send or receive to be overridden with a non-user-space writing of bytes using FileChannel.transferTo. ======================================================================= ==./core/src/main/scala/kafka/producer/async/AsyncProducerStats.scala ======================================================================= ======================================================================= ==./core/src/main/scala/kafka/server/package.html ======================================================================= The kafka server. ======================================================================= ==./core/src/main/scala/kafka/utils/CommandLineUtils.scala ======================================================================= package kafka.utils import joptsimple.{OptionSpec, OptionSet, OptionParser} /** * Helper functions for dealing with command line utilities */ object CommandLineUtils extends Logging { def checkRequiredArgs(parser: OptionParser, options: OptionSet, required: OptionSpec[_]*) { for(arg <- required) { if(!options.has(arg)) { error("Missing required argument \"" + arg + "\"") parser.printHelpOn(System.err) System.exit(1) } } } } ======================================================================= ==./core/src/main/scala/kafka/utils/FileLock.scala ======================================================================= package kafka.utils import java.io._ import java.nio.channels._ /** * A file lock a la flock/funlock * * The given path will be created and opened if it doesn't exist. */ class FileLock(val file: File) extends Logging { file.createNewFile() private val channel = new RandomAccessFile(file, "rw").getChannel() private var flock: java.nio.channels.FileLock = null /** * Lock the file or throw an exception if the lock is already held */ def lock() { this synchronized { trace("Acquiring lock on " + file.getAbsolutePath) flock = channel.lock() } } /** * Try to lock the file and return true if the locking succeeds */ def tryLock(): Boolean = { this synchronized { trace("Acquiring lock on " + file.getAbsolutePath) try { // weirdly this method will return null if the lock is held by another // process, but will throw an exception if the lock is held by this process // so we have to handle both cases flock = channel.tryLock() flock != null } catch { case e: OverlappingFileLockException => false } } } /** * Unlock the lock if it is held */ def unlock() { this synchronized { trace("Releasing lock on " + file.getAbsolutePath) if(flock != null) ======================================================================= ==./core/src/main/scala/kafka/utils/Json.scala ======================================================================= package kafka.utils import kafka.common._ import util.parsing.json.JSON /** * A wrapper that synchronizes JSON in scala, which is not threadsafe. */ object Json extends Logging { val myConversionFunc = {input : String => input.toInt} JSON.globalNumberParser = myConversionFunc val lock = new Object def parseFull(input: String): Option[Any] = { lock synchronized { try { JSON.parseFull(input) } catch { case t => throw new KafkaException("Can't parse json string: %s".format(input), t) } } } } ======================================================================= ==./core/src/main/scala/kafka/utils/Topic.scala ======================================================================= ======================================================================= ==./core/src/main/scala/kafka/utils/package.html ======================================================================= Utility functions. ======================================================================= ==./core/src/test/scala/unit/kafka/log/LogSegmentTest.scala ======================================================================= package kafka.log import junit.framework.Assert._ import java.util.concurrent.atomic._ import org.junit.{Test, After} import org.scalatest.junit.JUnit3Suite import kafka.utils.TestUtils import kafka.message._ import kafka.utils.SystemTime import scala.collection._ class LogSegmentTest extends JUnit3Suite { val segments = mutable.ArrayBuffer[LogSegment]() def createSegment(offset: Long): LogSegment = { val msFile = TestUtils.tempFile() val ms = new FileMessageSet(msFile) val idxFile = TestUtils.tempFile() idxFile.delete() val idx = new OffsetIndex(idxFile, offset, 1000) val seg = new LogSegment(ms, idx, offset, 10, SystemTime) segments += seg seg } def messages(offset: Long, messages: String*): ByteBufferMessageSet = { new ByteBufferMessageSet(compressionCodec = NoCompressionCodec, offsetCounter = new AtomicLong(offset), messages = messages.map(s => new Message(s.getBytes)):_*) } @After def teardown() { for(seg <- segments) { seg.index.delete() seg.messageSet.delete() } } @Test def testReadOnEmptySegment() { val seg = createSegment(40) val read = seg.read(startOffset = 40, maxSize = 300, maxOffset = None) assertEquals(0, read.size) } @Test def testReadBeforeFirstOffset() { val seg = createSegment(40) ======================================================================= ==./examples/build.sbt ======================================================================= name := "kafka-java-examples" crossPaths := false ======================================================================= ==./perf/build.sbt ======================================================================= name := "kafka-perf" ======================================================================= ==./project/plugins.sbt ======================================================================= resolvers += Resolver.url("artifactory", url("http://scalasbt.artifactoryonline.com/scalasbt/sbt-plugin-releases"))(Resolver.ivyStylePatterns) addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.8.8") addSbtPlugin("com.github.mpeltonen" % "sbt-idea" % "1.2.0") ======================================================================= ==./system_test/__init__.py ======================================================================= ======================================================================= ==./system_test/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/logging.conf ======================================================================= # ============================================== # declaration - must have a 'root' logger # ============================================== [loggers] keys=root,namedLogger,anonymousLogger [handlers] keys=namedConsoleHandler,anonymousConsoleHandler [formatters] keys=namedFormatter,anonymousFormatter # ============================================== # loggers session # ============================================== [logger_root] level=NOTSET handlers= [logger_namedLogger] level=DEBUG handlers=namedConsoleHandler qualname=namedLogger propagate=0 [logger_anonymousLogger] level=DEBUG handlers=anonymousConsoleHandler qualname=anonymousLogger propagate=0 # ============================================== # handlers session # ** Change 'level' to INFO/DEBUG in this session # ============================================== [handler_namedConsoleHandler] class=StreamHandler level=INFO formatter=namedFormatter args=[] [handler_anonymousConsoleHandler] class=StreamHandler level=INFO formatter=anonymousFormatter args=[] # ============================================== # formatters session # ============================================== ======================================================================= ==./system_test/metrics.json ======================================================================= { "dashboards": [ { "role": "broker", "graphs": [ { "graph_name": "Produce-Request-Rate", "y_label": "requests-per-sec", "bean_name": "kafka.network:type=RequestMetrics,name=Produce-RequestsPerSec", "attributes": "OneMinuteRate" }, { "graph_name": "Produce-Request-Time", "y_label": "ms,ms", "bean_name": "kafka.network:type=RequestMetrics,name=Produce-TotalTimeMs", "attributes": "Mean,99thPercentile" }, { "graph_name": "Produce-Request-Remote-Time", "y_label": "ms,ms", "bean_name": "kafka.network:type=RequestMetrics,name=Produce-RemoteTimeMs", "attributes": "Mean,99thPercentile" }, { "graph_name": "Fetch-Consumer-Request-Rate", "y_label": "requests-per-sec", "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Consumer-RequestsPerSec", "attributes": "OneMinuteRate" }, { "graph_name": "Fetch-Consumer-Request-Time", "y_label": "ms,ms", "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Consumer-TotalTimeMs", "attributes": "Mean,99thPercentile" }, { "graph_name": "Fetch-Consumer-Request-Remote-Time", "y_label": "ms,ms", "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Consumer-RemoteTimeMs", "attributes": "Mean,99thPercentile" }, { "graph_name": "Fetch-Follower-Request-Rate", "y_label": "requests-per-sec", "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Follower-RequestsPerSec", "attributes": "OneMinuteRate" }, { "graph_name": "Fetch-Follower-Request-Time", "y_label": "ms,ms", ======================================================================= ==./system_test/run_sanity.sh ======================================================================= #!/bin/bash my_ts=`date +"%s"` cp testcase_to_run.json testcase_to_run.json_${my_ts} cp testcase_to_run_sanity.json testcase_to_run.json python -B system_test_runner.py ======================================================================= ==./system_test/testcase_to_run.json ======================================================================= { "ReplicaBasicTest" : [ "testcase_0001" ] } ======================================================================= ==./system_test/testcase_to_run_all.json ======================================================================= { "ReplicaBasicTest" : [ "testcase_0001", "testcase_0002", "testcase_0003", "testcase_0004", "testcase_0005", "testcase_0006", "testcase_0007", "testcase_0008", "testcase_0009", "testcase_0010", "testcase_0021", "testcase_0022", "testcase_0023", "testcase_0101", "testcase_0102", "testcase_0103", "testcase_0104", "testcase_0105", "testcase_0106", "testcase_0107", "testcase_0108", "testcase_0109", "testcase_0110", "testcase_0111", "testcase_0112", "testcase_0113", "testcase_0114", "testcase_0115", "testcase_0116", "testcase_0117", "testcase_0118", "testcase_0121", "testcase_0122", "testcase_0123", "testcase_0124", "testcase_0125", "testcase_0126", "testcase_0127", "testcase_0131", "testcase_0132", "testcase_0133", "testcase_0151", ======================================================================= ==./system_test/testcase_to_run_sanity.json ======================================================================= { "ReplicaBasicTest" : [ "testcase_1" ] } ======================================================================= ==./system_test/testcase_to_skip.json ======================================================================= { "ReplicaBasicTest": [ "testcase_1" ] } ======================================================================= ==./system_test/migration_tool_testsuite/__init__.py ======================================================================= ======================================================================= ==./system_test/migration_tool_testsuite/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "broker", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9994" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/migration_tool_testsuite/testcase_9001/testcase_9001_properties.json ======================================================================= { "description": {"01":"To Test : 'Replication with Migration Tool'", "02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET", "03":"Produce and consume messages to a single topic - single partition.", "04":"This test sends messages to 3 replicas", "05":"At the end it verifies the log size and contents", "06":"Use a consumer to verify no message loss in TARGET cluster.", "07":"Producer dimensions : mode:sync, acks:-1, comp:0", "08":"Log segment size : 51200" }, "testcase_args": { "bounce_migration_tool": "false", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "30", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_0.log", "config_filename": "zookeeper_0.properties" }, { "entity_id": "1", "port": "9091", "brokerid": "1", "version": "0.7", "log.file.size": "51200", "log.dir": "/tmp/kafka_server_1_logs", "log_filename": "kafka_server_1.log", "config_filename": "kafka_server_1.properties" }, { "entity_id": "2", "port": "9092", "brokerid": "2", "version": "0.7", "log.file.size": "51200", "log.dir": "/tmp/kafka_server_2_logs", "log_filename": "kafka_server_2.log", "config_filename": "kafka_server_2.properties" }, { "entity_id": "3", "port": "9093", ======================================================================= ==./system_test/migration_tool_testsuite/testcase_9003/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "broker", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9994" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/migration_tool_testsuite/testcase_9003/testcase_9003_properties.json ======================================================================= { "description": {"01":"To Test : 'Replication with Migration Tool'", "02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET", "03":"Produce and consume messages to a single topic - single partition.", "04":"This test sends messages to 3 replicas", "05":"At the end it verifies the log size and contents", "06":"Use a consumer to verify no message loss in TARGET cluster.", "07":"Producer dimensions : mode:async, acks:-1, comp:1", "08":"Log segment size : 51200" }, "testcase_args": { "bounce_migration_tool": "true", "bounced_entity_downtime_sec": "30", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "30", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_0.log", "config_filename": "zookeeper_0.properties" }, { "entity_id": "1", "port": "9091", "brokerid": "1", "version": "0.7", "log.file.size": "51200", "log.dir": "/tmp/kafka_server_1_logs", "log_filename": "kafka_server_1.log", "config_filename": "kafka_server_1.properties" }, { "entity_id": "2", "port": "9092", "brokerid": "2", "version": "0.7", "log.file.size": "51200", "log.dir": "/tmp/kafka_server_2_logs", "log_filename": "kafka_server_2.log", "config_filename": "kafka_server_2.properties" }, { "entity_id": "3", ======================================================================= ==./system_test/migration_tool_testsuite/testcase_9004/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "broker", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9994" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/migration_tool_testsuite/testcase_9004/testcase_9004_properties.json ======================================================================= { "description": {"01":"To Test : 'Replication with Migration Tool'", "02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET", "03":"Produce and consume messages to a single topic - single partition.", "04":"This test sends messages to 3 replicas", "05":"At the end it verifies the log size and contents", "06":"Use a consumer to verify no message loss in TARGET cluster.", "07":"Producer dimensions : mode:async, acks:1, comp:1", "08":"Log segment size : 51200" }, "testcase_args": { "bounce_migration_tool": "true", "bounced_entity_downtime_sec": "30", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "30", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_0.log", "config_filename": "zookeeper_0.properties" }, { "entity_id": "1", "port": "9091", "brokerid": "1", "version": "0.7", "log.file.size": "51200", "log.dir": "/tmp/kafka_server_1_logs", "log_filename": "kafka_server_1.log", "config_filename": "kafka_server_1.properties" }, { "entity_id": "2", "port": "9092", "brokerid": "2", "version": "0.7", "log.file.size": "51200", "log.dir": "/tmp/kafka_server_2_logs", "log_filename": "kafka_server_2.log", "config_filename": "kafka_server_2.properties" }, { "entity_id": "3", ======================================================================= ==./system_test/migration_tool_testsuite/testcase_9005/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9900" }, { "entity_id": "1", "hostname": "localhost", "role": "zookeeper", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9901" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9902" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9903" }, { "entity_id": "4", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9904" }, ======================================================================= ==./system_test/migration_tool_testsuite/testcase_9005/testcase_9005_properties.json ======================================================================= { "description": {"01":"To Test : 'Replication with Migration Tool'", "02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET", "03":"Produce and consume messages to 2 topics - 2 partitions.", "04":"This test sends messages to 3 replicas", "05":"At the end it verifies the log size and contents", "06":"Use a consumer to verify no message loss in TARGET cluster.", "07":"Producer dimensions : mode:async, acks:-1, comp:1", "08":"Log segment size : 51200" }, "testcase_args": { "bounce_migration_tool": "true", "bounced_entity_downtime_sec": "30", "replica_factor": "3", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "30", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_0.log", "config_filename": "zookeeper_0.properties" }, { "entity_id": "1", "clientPort": "2191", "dataDir": "/tmp/zookeeper_1", "log_filename": "zookeeper_1.log", "config_filename": "zookeeper_1.properties" }, { "entity_id": "2", "port": "9091", "brokerid": "1", "version": "0.7", "log.file.size": "51200", "log.dir": "/tmp/kafka_server_2_logs", "log_filename": "kafka_server_2.log", "config_filename": "kafka_server_2.properties" }, { "entity_id": "3", "port": "9092", ======================================================================= ==./system_test/migration_tool_testsuite/testcase_9006/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9900" }, { "entity_id": "1", "hostname": "localhost", "role": "zookeeper", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9901" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9902" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9903" }, { "entity_id": "4", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "system_test/migration_tool_testsuite/0.7", "java_home": "default", "jmx_port": "9904" }, ======================================================================= ==./system_test/migration_tool_testsuite/testcase_9006/testcase_9006_properties.json ======================================================================= { "description": {"01":"To Test : 'Replication with Migration Tool'", "02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET", "03":"Produce and consume messages to 2 topics - 2 partitions.", "04":"This test sends messages to 3 replicas", "05":"At the end it verifies the log size and contents", "06":"Use a consumer to verify no message loss in TARGET cluster.", "07":"Producer dimensions : mode:async, acks:1, comp:1", "08":"Log segment size : 51200" }, "testcase_args": { "bounce_migration_tool": "true", "bounced_entity_downtime_sec": "30", "replica_factor": "3", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "30", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_0.log", "config_filename": "zookeeper_0.properties" }, { "entity_id": "1", "clientPort": "2191", "dataDir": "/tmp/zookeeper_1", "log_filename": "zookeeper_1.log", "config_filename": "zookeeper_1.properties" }, { "entity_id": "2", "port": "9091", "brokerid": "1", "version": "0.7", "log.file.size": "51200", "log.dir": "/tmp/kafka_server_2_logs", "log_filename": "kafka_server_2.log", "config_filename": "kafka_server_2.properties" }, { "entity_id": "3", "port": "9092", ======================================================================= ==./system_test/mirror_maker/bin/expected.out ======================================================================= start the servers ... start producing messages ... wait for consumer to finish consuming ... [2011-05-17 14:49:11,605] INFO Creating async producer for broker id = 2 at localhost:9091 (kafka.producer.ProducerPool) [2011-05-17 14:49:11,606] INFO Creating async producer for broker id = 1 at localhost:9092 (kafka.producer.ProducerPool) [2011-05-17 14:49:11,607] INFO Creating async producer for broker id = 3 at localhost:9090 (kafka.producer.ProducerPool) thread 0: 400000 messages sent 3514012.1233 nMsg/sec 3.3453 MBs/sec [2011-05-17 14:49:34,382] INFO Closing all async producers (kafka.producer.ProducerPool) [2011-05-17 14:49:34,383] INFO Closed AsyncProducer (kafka.producer.async.AsyncProducer) [2011-05-17 14:49:34,384] INFO Closed AsyncProducer (kafka.producer.async.AsyncProducer) [2011-05-17 14:49:34,385] INFO Closed AsyncProducer (kafka.producer.async.AsyncProducer) Total Num Messages: 400000 bytes: 79859641 in 22.93 secs Messages/sec: 17444.3960 MB/sec: 3.3214 test passed stopping the servers bin/../../../bin/zookeeper-server-start.sh: line 9: 22584 Terminated $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.server.quorum.QuorumPeerMain $@ bin/../../../bin/zookeeper-server-start.sh: line 9: 22585 Terminated $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.server.quorum.QuorumPeerMain $@ ======================================================================= ==./system_test/mirror_maker_testsuite/__init__.py ======================================================================= ======================================================================= ==./system_test/mirror_maker_testsuite/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9100" }, { "entity_id": "1", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9101" }, { "entity_id": "2", "hostname": "localhost", "role": "zookeeper", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9102" }, { "entity_id": "3", "hostname": "localhost", "role": "zookeeper", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9103" }, { "entity_id": "4", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9104" }, { ======================================================================= ==./system_test/mirror_maker_testsuite/config/console_consumer.properties ======================================================================= ======================================================================= ==./system_test/mirror_maker_testsuite/config/consumer.properties ======================================================================= ======================================================================= ==./system_test/mirror_maker_testsuite/config/log4j.properties ======================================================================= ======================================================================= ==./system_test/mirror_maker_testsuite/config/mirror_consumer.properties ======================================================================= zookeeper.connect=localhost:2108 zookeeper.connection.timeout.ms=1000000 group.id=mm_regtest_grp auto.commit.interval.ms=120000 auto.offset.reset=smallest #fetch.message.max.bytes=1048576 #rebalance.max.retries=4 #rebalance.backoff.ms=2000 socket.receive.buffer.bytes=1048576 fetch.message.max.bytes=1048576 zookeeper.sync.time.ms=15000 shallow.iterator.enable=false ======================================================================= ==./system_test/mirror_maker_testsuite/config/mirror_producer.properties ======================================================================= producer.type=async queue.enqueue.timeout.ms=-1 metadata.broker.list=localhost:9094 compression.codec=0 message.send.max.retries=3 request.required.acks=1 ======================================================================= ==./system_test/mirror_maker_testsuite/config/producer.properties ======================================================================= ======================================================================= ==./system_test/mirror_maker_testsuite/config/producer_performance.properties ======================================================================= ======================================================================= ==./system_test/mirror_maker_testsuite/testcase_5001/testcase_5001_properties.json ======================================================================= { "description": {"01":"To Test : 'Replication with Mirror Maker'", "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", "04":"Produce and consume messages to a single topic - single partition.", "05":"This test sends messages to 3 replicas", "06":"At the end it verifies the log size and contents", "07":"Use a consumer to verify no message loss in TARGET cluster.", "08":"Producer dimensions : mode:sync, acks:-1, comp:0", "09":"Log segment size : 10240" }, "testcase_args": { "bounce_leader": "false", "bounce_mirror_maker": "false", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2108", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_0.log", "config_filename": "zookeeper_0.properties" }, { "entity_id": "1", "clientPort": "2118", "dataDir": "/tmp/zookeeper_1", "log_filename": "zookeeper_1.log", "config_filename": "zookeeper_1.properties" }, { "entity_id": "2", "clientPort": "2128", "dataDir": "/tmp/zookeeper_2", "log_filename": "zookeeper_2.log", "config_filename": "zookeeper_2.properties" }, { "entity_id": "3", "clientPort": "2138", "dataDir": "/tmp/zookeeper_3", "log_filename": "zookeeper_3.log", "config_filename": "zookeeper_3.properties" ======================================================================= ==./system_test/mirror_maker_testsuite/testcase_5002/testcase_5002_properties.json ======================================================================= { "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", "04":"Produce and consume messages to a single topic - single partition.", "05":"This test sends messages to 3 replicas", "06":"At the end it verifies the log size and contents", "07":"Use a consumer to verify no message loss in TARGET cluster.", "08":"Producer dimensions : mode:sync, acks:-1, comp:0", "09":"Log segment size : 10240" }, "testcase_args": { "bounce_leader": "false", "bounce_mirror_maker": "true", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2108", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_0.log", "config_filename": "zookeeper_0.properties" }, { "entity_id": "1", "clientPort": "2118", "dataDir": "/tmp/zookeeper_1", "log_filename": "zookeeper_1.log", "config_filename": "zookeeper_1.properties" }, { "entity_id": "2", "clientPort": "2128", "dataDir": "/tmp/zookeeper_2", "log_filename": "zookeeper_2.log", "config_filename": "zookeeper_2.properties" }, { "entity_id": "3", "clientPort": "2138", "dataDir": "/tmp/zookeeper_3", "log_filename": "zookeeper_3.log", "config_filename": "zookeeper_3.properties" ======================================================================= ==./system_test/mirror_maker_testsuite/testcase_5003/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9100" }, { "entity_id": "1", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9101" }, { "entity_id": "2", "hostname": "localhost", "role": "zookeeper", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9102" }, { "entity_id": "3", "hostname": "localhost", "role": "zookeeper", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9103" }, { "entity_id": "4", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9104" }, { ======================================================================= ==./system_test/mirror_maker_testsuite/testcase_5003/testcase_5003_properties.json ======================================================================= { "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", "04":"Produce and consume messages to a single topic - single partition.", "05":"This test sends messages to 3 replicas", "06":"At the end it verifies the log size and contents", "07":"Use a consumer to verify no message loss in TARGET cluster.", "08":"Producer dimensions : mode:async, acks:-1, comp:1", "09":"Log segment size : 10240" }, "testcase_args": { "bounce_leader": "false", "bounce_mirror_maker": "true", "bounced_entity_downtime_sec": "30", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2108", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_0.log", "config_filename": "zookeeper_0.properties" }, { "entity_id": "1", "clientPort": "2118", "dataDir": "/tmp/zookeeper_1", "log_filename": "zookeeper_1.log", "config_filename": "zookeeper_1.properties" }, { "entity_id": "2", "clientPort": "2128", "dataDir": "/tmp/zookeeper_2", "log_filename": "zookeeper_2.log", "config_filename": "zookeeper_2.properties" }, { "entity_id": "3", "clientPort": "2138", "dataDir": "/tmp/zookeeper_3", "log_filename": "zookeeper_3.log", ======================================================================= ==./system_test/mirror_maker_testsuite/testcase_5004/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9100" }, { "entity_id": "1", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9101" }, { "entity_id": "2", "hostname": "localhost", "role": "zookeeper", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9102" }, { "entity_id": "3", "hostname": "localhost", "role": "zookeeper", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9103" }, { "entity_id": "4", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9104" }, { ======================================================================= ==./system_test/mirror_maker_testsuite/testcase_5004/testcase_5004_properties.json ======================================================================= { "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", "04":"Produce and consume messages to a single topic - single partition.", "05":"This test sends messages to 3 replicas", "06":"At the end it verifies the log size and contents", "07":"Use a consumer to verify no message loss in TARGET cluster.", "08":"Producer dimensions : mode:async, acks:1, comp:1", "09":"Log segment size : 10240" }, "testcase_args": { "bounce_leader": "false", "bounce_mirror_maker": "true", "bounced_entity_downtime_sec": "30", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2108", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_0.log", "config_filename": "zookeeper_0.properties" }, { "entity_id": "1", "clientPort": "2118", "dataDir": "/tmp/zookeeper_1", "log_filename": "zookeeper_1.log", "config_filename": "zookeeper_1.properties" }, { "entity_id": "2", "clientPort": "2128", "dataDir": "/tmp/zookeeper_2", "log_filename": "zookeeper_2.log", "config_filename": "zookeeper_2.properties" }, { "entity_id": "3", "clientPort": "2138", "dataDir": "/tmp/zookeeper_3", "log_filename": "zookeeper_3.log", ======================================================================= ==./system_test/mirror_maker_testsuite/testcase_5005/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9100" }, { "entity_id": "1", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9101" }, { "entity_id": "2", "hostname": "localhost", "role": "zookeeper", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9102" }, { "entity_id": "3", "hostname": "localhost", "role": "zookeeper", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9103" }, { "entity_id": "4", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9104" }, { ======================================================================= ==./system_test/mirror_maker_testsuite/testcase_5005/testcase_5005_properties.json ======================================================================= { "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", "04":"Produce and consume messages to 2 topics - 2 partitions.", "05":"This test sends messages to 3 replicas", "06":"At the end it verifies the log size and contents", "07":"Use a consumer to verify no message loss in TARGET cluster.", "08":"Producer dimensions : mode:async, acks:-1, comp:1", "09":"Log segment size : 10240" }, "testcase_args": { "bounce_leader": "false", "bounce_mirror_maker": "true", "bounced_entity_downtime_sec": "30", "replica_factor": "3", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2108", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_0.log", "config_filename": "zookeeper_0.properties" }, { "entity_id": "1", "clientPort": "2118", "dataDir": "/tmp/zookeeper_1", "log_filename": "zookeeper_1.log", "config_filename": "zookeeper_1.properties" }, { "entity_id": "2", "clientPort": "2128", "dataDir": "/tmp/zookeeper_2", "log_filename": "zookeeper_2.log", "config_filename": "zookeeper_2.properties" }, { "entity_id": "3", "clientPort": "2138", "dataDir": "/tmp/zookeeper_3", "log_filename": "zookeeper_3.log", ======================================================================= ==./system_test/mirror_maker_testsuite/testcase_5006/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9100" }, { "entity_id": "1", "hostname": "localhost", "role": "zookeeper", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9101" }, { "entity_id": "2", "hostname": "localhost", "role": "zookeeper", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9102" }, { "entity_id": "3", "hostname": "localhost", "role": "zookeeper", "cluster_name":"target", "kafka_home": "default", "java_home": "default", "jmx_port": "9103" }, { "entity_id": "4", "hostname": "localhost", "role": "broker", "cluster_name":"source", "kafka_home": "default", "java_home": "default", "jmx_port": "9104" }, { ======================================================================= ==./system_test/mirror_maker_testsuite/testcase_5006/testcase_5006_properties.json ======================================================================= { "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", "04":"Produce and consume messages to 2 topics - 2 partitions.", "05":"This test sends messages to 3 replicas", "06":"At the end it verifies the log size and contents", "07":"Use a consumer to verify no message loss in TARGET cluster.", "08":"Producer dimensions : mode:async, acks:1, comp:1", "09":"Log segment size : 10240" }, "testcase_args": { "bounce_leader": "false", "bounce_mirror_maker": "true", "bounced_entity_downtime_sec": "30", "replica_factor": "3", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2108", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_0.log", "config_filename": "zookeeper_0.properties" }, { "entity_id": "1", "clientPort": "2118", "dataDir": "/tmp/zookeeper_1", "log_filename": "zookeeper_1.log", "config_filename": "zookeeper_1.properties" }, { "entity_id": "2", "clientPort": "2128", "dataDir": "/tmp/zookeeper_2", "log_filename": "zookeeper_2.log", "config_filename": "zookeeper_2.properties" }, { "entity_id": "3", "clientPort": "2138", "dataDir": "/tmp/zookeeper_3", "log_filename": "zookeeper_3.log", ======================================================================= ==./system_test/producer_perf/bin/expected.out ======================================================================= start the servers ... start producing 2000000 messages ... [2011-05-17 14:31:12,568] INFO Creating async producer for broker id = 0 at localhost:9092 (kafka.producer.ProducerPool) thread 0: 100000 messages sent 3272786.7779 nMsg/sec 3.1212 MBs/sec thread 0: 200000 messages sent 3685956.5057 nMsg/sec 3.5152 MBs/sec thread 0: 300000 messages sent 3717472.1190 nMsg/sec 3.5453 MBs/sec thread 0: 400000 messages sent 3730647.2673 nMsg/sec 3.5578 MBs/sec thread 0: 500000 messages sent 3730647.2673 nMsg/sec 3.5578 MBs/sec thread 0: 600000 messages sent 3722315.2801 nMsg/sec 3.5499 MBs/sec thread 0: 700000 messages sent 3718854.5928 nMsg/sec 3.5466 MBs/sec thread 0: 800000 messages sent 3714020.4271 nMsg/sec 3.5420 MBs/sec thread 0: 900000 messages sent 3713330.8578 nMsg/sec 3.5413 MBs/sec thread 0: 1000000 messages sent 3710575.1391 nMsg/sec 3.5387 MBs/sec thread 0: 1100000 messages sent 3711263.6853 nMsg/sec 3.5393 MBs/sec thread 0: 1200000 messages sent 3716090.6726 nMsg/sec 3.5439 MBs/sec thread 0: 1300000 messages sent 3709198.8131 nMsg/sec 3.5374 MBs/sec thread 0: 1400000 messages sent 3705762.4606 nMsg/sec 3.5341 MBs/sec thread 0: 1500000 messages sent 3701647.2330 nMsg/sec 3.5302 MBs/sec thread 0: 1600000 messages sent 3696174.4594 nMsg/sec 3.5249 MBs/sec thread 0: 1700000 messages sent 3703703.7037 nMsg/sec 3.5321 MBs/sec thread 0: 1800000 messages sent 3703017.9596 nMsg/sec 3.5315 MBs/sec thread 0: 1900000 messages sent 3700277.5208 nMsg/sec 3.5289 MBs/sec thread 0: 2000000 messages sent 3702332.4695 nMsg/sec 3.5308 MBs/sec [2011-05-17 14:33:01,102] INFO Closing all async producers (kafka.producer.ProducerPool) [2011-05-17 14:33:01,103] INFO Closed AsyncProducer (kafka.producer.async.AsyncProducer) Total Num Messages: 2000000 bytes: 400000000 in 108.678 secs Messages/sec: 18402.9886 MB/sec: 3.5101 wait for data to be persisted test passed bin/../../../bin/kafka-server-start.sh: line 11: 21110 Terminated $(dirname $0)/kafka-run-class.sh kafka.Kafka $@ bin/../../../bin/zookeeper-server-start.sh: line 9: 21109 Terminated $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.server.quorum.QuorumPeerMain $@ ======================================================================= ==./system_test/replication_testsuite/__init__.py ======================================================================= ======================================================================= ==./system_test/replication_testsuite/config/console_consumer.properties ======================================================================= ======================================================================= ==./system_test/replication_testsuite/config/consumer.properties ======================================================================= ======================================================================= ==./system_test/replication_testsuite/config/log4j.properties ======================================================================= ======================================================================= ==./system_test/replication_testsuite/config/producer.properties ======================================================================= ======================================================================= ==./system_test/replication_testsuite/config/producer_performance.properties ======================================================================= ======================================================================= ==./system_test/replication_testsuite/testcase_0001/testcase_0001_properties.json ======================================================================= { "description": {"01":"Replication Basic : Base Test", "02":"Produce and consume messages to a single topic - single partition.", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:sync, acks:-1, comp:0", "07":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, { ======================================================================= ==./system_test/replication_testsuite/testcase_0002/testcase_0002_properties.json ======================================================================= { "description": {"01":"Replication Basic : 1. comp => 1", "02":"Produce and consume messages to a single topic - single partition.", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:sync, acks:-1, comp:1", "07":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, { ======================================================================= ==./system_test/replication_testsuite/testcase_0003/testcase_0003_properties.json ======================================================================= { "description": {"01":"Replication Basic : 1. acks => 1; 2. comp => 1", "02":"Produce and consume messages to a single topic - single partition.", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:sync, acks:1, comp:1", "07":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, { ======================================================================= ==./system_test/replication_testsuite/testcase_0004/testcase_0004_properties.json ======================================================================= { "description": {"01":"Replication Basic : 1. mode => async; 2. comp => 1", "02":"Produce and consume messages to a single topic - single partition.", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:async, acks:-1, comp:1", "07":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, { ======================================================================= ==./system_test/replication_testsuite/testcase_0005/testcase_0005_properties.json ======================================================================= { "description": {"01":"Replication Basic : 1. mode => async; 2. acks => 1; 3. comp => 1", "02":"Produce and consume messages to a single topic - single partition.", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:async, acks:1, comp:1", "07":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, { ======================================================================= ==./system_test/replication_testsuite/testcase_0006/testcase_0006_properties.json ======================================================================= { "description": {"01":"Replication Basic : 1. comp => 1", "02":"Produce and consume messages to a single topic - 3 partitions.", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:sync, acks:-1, comp:1", "07":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, { ======================================================================= ==./system_test/replication_testsuite/testcase_0007/testcase_0007_properties.json ======================================================================= { "description": {"01":"Replication Basic : 1. mode => async; 2. comp => 1", "02":"Produce and consume messages to a single topic - 3 partitions.", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:async, acks:-1, comp:1", "07":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, { ======================================================================= ==./system_test/replication_testsuite/testcase_0008/testcase_0008_properties.json ======================================================================= { "description": {"01":"Replication Basic : 1. acks => 1; 2. comp => 1", "02":"Produce and consume messages to a single topic - 3 partitions.", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:sync, acks:1, comp:1", "07":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, { ======================================================================= ==./system_test/replication_testsuite/testcase_0009/testcase_0009_properties.json ======================================================================= { "description": {"01":"Replication Basic : 1. mode => async; 2. acks => 1; 3. comp => 1", "02":"Produce and consume messages to a single topic - 3 partitions.", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:async, acks:1, comp:1", "07":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, { ======================================================================= ==./system_test/replication_testsuite/testcase_0010/testcase_0010_properties.json ======================================================================= { "description": {"01":"Replication Basic : 1. mode => async; 2. acks => 1; 3. comp => 1; 4. log segment size => 1M", "02":"Produce and consume messages to a single topic - 3 partitions.", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:async, acks:1, comp:1", "07":"Log segment size : 1048576 (1M)" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "1048576", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, { ======================================================================= ==./system_test/replication_testsuite/testcase_0011/testcase_0011_properties.json ======================================================================= { "description": {"01":"Replication Basic : 1. auto create topic => true", "02":"Produce and consume messages to a single topic - 3 partitions.", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:async, acks:1, comp:1", "07":"Log segment size : 1048576 (1M)" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "auto_create_topic": "true", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "1048576", "log.dir": "/tmp/kafka_server_2_logs", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, ======================================================================= ==./system_test/replication_testsuite/testcase_0021/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_0021/testcase_0021_properties.json ======================================================================= { "description": {"01":"Replication Basic on Multi Topics & Partitions : Base Test", "02":"Produce and consume messages to 2 topics - 3 partitions", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:sync, acks:-1, comp:0", "07":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, { "entity_id": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0022/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_0022/testcase_0022_properties.json ======================================================================= { "description": {"01":"Replication Basic on Multi Topics & Partitions : 1. acks => 1; 2. log segment size => 512K", "02":"Produce and consume messages to 2 topics - 3 partitions", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:sync, acks:1, comp:0", "07":"Log segment size : 512000" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, { "entity_id": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0023/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_0023/testcase_0023_properties.json ======================================================================= { "description": {"01":"Replication Basic on Multi Topics & Partitions : 1. mode => async; 2. acks => 1; 3. comp => 1", "02":"Produce and consume messages to 2 topics - 3 partitions", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:async, acks:1, comp:1", "07":"Log segment size : 512000" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "512000", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "512000", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, { "entity_id": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0024/testcase_0024_properties.json ======================================================================= { "description": {"01":"Replication Basic on Multi Topics & Partitions : 1. auto_create_topic => true", "02":"Produce and consume messages to 2 topics - 3 partitions", "03":"This test sends messages to 3 replicas", "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:async, acks:1, comp:1", "07":"Log segment size : 512000" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "auto_create_topic": "true", "producer_multi_topics_mode": "true", "consumer_multi_topics_mode": "true", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "512000", "log.dir": "/tmp/kafka_server_1_logs", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "512000", "log.dir": "/tmp/kafka_server_2_logs", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" ======================================================================= ==./system_test/replication_testsuite/testcase_0101/testcase_0101_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication : Base Test", "02":"Produce and consume messages to a single topic - single partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0102/testcase_0102_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication : 1. comp => 1", "02":"Produce and consume messages to a single topic - single partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:1", "10":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0103/testcase_0103_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication : 1. acks => 1; 2. comp => 1", "02":"Produce and consume messages to a single topic - single partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:1", "10":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0104/testcase_0104_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. comp => 0", "02":"Produce and consume messages to a single topic - single partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:1", "10":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0105/testcase_0105_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", "02":"Produce and consume messages to a single topic - single partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:1", "10":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "1", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0106/testcase_0106_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication : 1. comp => 1; 2. no of partion => 3", "02":"Produce and consume messages to a single topic - 3 partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:1", "10":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0107/testcase_0107_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. comp => 1; 3. no of partition => 3", "02":"Produce and consume messages to a single topic - 3 partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:1", "10":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0108/testcase_0108_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication : 1. acks => 1; 2. comp => 1; 3. no. of partition => 3", "02":"Produce and consume messages to a single topic - 3 partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:1", "10":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0109/testcase_0109_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp =>; 4. no. of partitions => 3", "02":"Produce and consume messages to a single topic - 3 partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:1", "10":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0110/testcase_0110_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp =>; 4. no. of partitins => 3; 5. log segment size => 1M", "02":"Produce and consume messages to a single topic - 3 partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:1", "10":"Log segment size : 1048576 (1M)" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "1048576", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0111/testcase_0111_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures in Replication : Base Test", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0112/testcase_0112_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures in Replication : 1. mode => async", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0113/testcase_0113_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures in Replication : 1. acks => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0114/testcase_0114_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures in Replication : 1. mode => async; 2. acks => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0115/testcase_0115_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures in Replication : 1. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0116/testcase_0116_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures in Replication : 1. mode => async; 2. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0117/testcase_0117_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures in Replication : 1. acks => 1; 2. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0118/testcase_0118_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0119/testcase_0119_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures in Replication : 1. auto_create_topic => true", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "auto_create_topic": "true", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_0121/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_0121/testcase_0121_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication with multi topics & partitions : Base Test", "02":"Produce and consume messages to 2 topics - 3 partitions", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0122/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_0122/testcase_0122_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. acks => 1", "02":"Produce and consume messages to 2 topics - 3 partitions", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:0", "10":"Log segment size : 512000" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "512000", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "512000", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0123/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_0123/testcase_0123_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. mode => async; 2. comp => 0", "02":"Produce and consume messages to 2 topics - 3 partitions", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:1", "10":"Log segment size : 512000" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "512000", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "512000", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0124/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_0124/testcase_0124_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. log.index.interval.bytes => 490", "02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message", "03":"Produce and consume messages to 2 topics - 3 partitions", "04":"This test sends messages to 3 replicas", "05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "06":"Restart the terminated broker", "07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "08":"At the end it verifies the log size and contents", "09":"Use a consumer to verify no message loss.", "10":"Producer dimensions : mode:sync, acks:-1, comp:0", "11":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log.index.interval.bytes": "490", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0125/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_0125/testcase_0125_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. ack => 1", "02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message", "03":"Produce and consume messages to 2 topics - 3 partitions", "04":"This test sends messages to 3 replicas", "05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "06":"Restart the terminated broker", "07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "08":"At the end it verifies the log size and contents", "09":"Use a consumer to verify no message loss.", "10":"Producer dimensions : mode:sync, acks:1, comp:0", "11":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log.index.interval.bytes": "490", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0126/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_0126/testcase_0126_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. ack => -1, 2. comp => 1", "02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message", "03":"Produce and consume messages to 2 topics - 3 partitions", "04":"This test sends messages to 3 replicas", "05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "06":"Restart the terminated broker", "07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "08":"At the end it verifies the log size and contents", "09":"Use a consumer to verify no message loss.", "10":"Producer dimensions : mode:sync, acks:-1, comp:1", "11":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log.index.interval.bytes": "490", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0127/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_0127/testcase_0127_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. ack => 1", "02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message", "03":"Produce and consume messages to 2 topics - 3 partitions", "04":"This test sends messages to 3 replicas", "05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "06":"Restart the terminated broker", "07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "08":"At the end it verifies the log size and contents", "09":"Use a consumer to verify no message loss.", "10":"Producer dimensions : mode:sync, acks:1, comp:1", "11":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log.index.interval.bytes": "490", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0128/testcase_0128_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. auto_create_topic => true", "02":"Produce and consume messages to 2 topics - 3 partitions", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "auto_create_topic": "true", "producer_multi_topics_mode": "true", "consumer_multi_topics_mode": "true", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "102400", ======================================================================= ==./system_test/replication_testsuite/testcase_0131/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_0131/testcase_0131_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication with multi topics & partitions : Base Test", "02":"Produce and consume messages to 2 topics - 3 partitions", "03":"This test sends messages to 2 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "2", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "2", "num.partitions": "3", "log.index.interval.bytes": "10", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "2", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0132/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_0132/testcase_0132_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. acks => 1", "02":"Produce and consume messages to 2 topics - 3 partitions", "03":"This test sends messages to 2 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:0", "10":"Log segment size : 512000" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "2", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "512000", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "2", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "512000", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "2", "num.partitions": "3", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0133/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_0133/testcase_0133_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. mode => async; 2. comp => 0", "02":"Produce and consume messages to 2 topics - 3 partitions", "03":"This test sends messages to 2 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:1", "10":"Log segment size : 512000" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "replica_factor": "2", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "512000", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "2", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "512000", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "2", "num.partitions": "3", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_0134/testcase_0134_properties.json ======================================================================= { "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. auto_create_topic => true", "02":"Produce and consume messages to 2 topics - 3 partitions", "03":"This test sends messages to 2 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "2", "num_partition": "3", "num_iteration": "3", "auto_create_topic": "true", "producer_multi_topics_mode": "true", "consumer_multi_topics_mode": "true", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "num.partitions": "3", "default.replication.factor": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "log.index.interval.bytes": "10", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "num.partitions": "3", "default.replication.factor": "2", ======================================================================= ==./system_test/replication_testsuite/testcase_0151/testcase_0151_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : Base Test", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "signal_type": "SIGKILL", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0152/testcase_0152_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. mode => async", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "signal_type": "SIGKILL", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0153/testcase_0153_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. acks => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "signal_type": "SIGKILL", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0154/testcase_0154_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. mode => async; 2. acks => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "signal_type": "SIGKILL", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0155/testcase_0155_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "signal_type": "SIGKILL", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0156/testcase_0156_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. mode => async; 2. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "signal_type": "SIGKILL", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0157/testcase_0157_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. acks => 1; 2. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "signal_type": "SIGKILL", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0158/testcase_0158_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "auto_create_topic": "true", "bounce_broker": "true", "signal_type": "SIGKILL", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0159/testcase_0159_properties.json ======================================================================= { "description": {"01":"Multi Leader Failures (SIGKILL) in Replication : 1. auto_create_topic => true", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "signal_type": "SIGKILL", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "auto_create_topic": "true", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "102400", ======================================================================= ==./system_test/replication_testsuite/testcase_0201/testcase_0201_properties.json ======================================================================= { "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : Base Test", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "controller", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0202/testcase_0202_properties.json ======================================================================= { "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. mode => async", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "controller", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0203/testcase_0203_properties.json ======================================================================= { "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. acks => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "controller", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0204/testcase_0204_properties.json ======================================================================= { "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. mode => async; 2. acks => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "controller", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0205/testcase_0205_properties.json ======================================================================= { "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "controller", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0206/testcase_0206_properties.json ======================================================================= { "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. mode => async; 2. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "controller", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0207/testcase_0207_properties.json ======================================================================= { "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. acks => 1; 2. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "controller", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0208/testcase_0208_properties.json ======================================================================= { "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "controller", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0209/testcase_0209_properties.json ======================================================================= { "description": {"01":"Multi Controller Failures (SIGTERM) in Replication : 1. auto_create_topic => true", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "controller", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "auto_create_topic": "true", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "102400", ======================================================================= ==./system_test/replication_testsuite/testcase_0251/testcase_0251_properties.json ======================================================================= { "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : Base Test", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "follower", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0252/testcase_0252_properties.json ======================================================================= { "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. mode => async", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "follower", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0253/testcase_0253_properties.json ======================================================================= { "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. acks => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "follower", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0254/testcase_0254_properties.json ======================================================================= { "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. mode => async; 2. acks => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "follower", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0255/testcase_0255_properties.json ======================================================================= { "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "follower", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0256/testcase_0256_properties.json ======================================================================= { "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. mode => async; 2. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "follower", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0257/testcase_0257_properties.json ======================================================================= { "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. acks => 1; 2. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "follower", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0258/testcase_0258_properties.json ======================================================================= { "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "follower", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_0259/testcase_0259_properties.json ======================================================================= { "description": {"01":"Multi Follower Failures (SIGTERM) in Replication : 1. auto_create_topic => true", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "follower", "bounce_broker": "true", "signal_type": "SIGTERM", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "auto_create_topic": "true", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "102400", ======================================================================= ==./system_test/replication_testsuite/testcase_0301/testcase_0301_properties.json ======================================================================= { "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : Base Test", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", "05":"At the end it verifies the log size and contents", "06":"Use a consumer to verify no message loss.", "07":"Producer dimensions : mode:sync, acks:-1, comp:0", "08":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "pause_time_in_seconds": "5", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" ======================================================================= ==./system_test/replication_testsuite/testcase_0302/testcase_0302_properties.json ======================================================================= { "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. mode => async", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", "05":"At the end it verifies the log size and contents", "06":"Use a consumer to verify no message loss.", "07":"Producer dimensions : mode:sync, acks:-1, comp:0", "08":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "pause_time_in_seconds": "5", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" ======================================================================= ==./system_test/replication_testsuite/testcase_0303/testcase_0303_properties.json ======================================================================= { "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. acks => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", "05":"At the end it verifies the log size and contents", "06":"Use a consumer to verify no message loss.", "07":"Producer dimensions : mode:sync, acks:-1, comp:0", "08":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "pause_time_in_seconds": "5", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" ======================================================================= ==./system_test/replication_testsuite/testcase_0304/testcase_0304_properties.json ======================================================================= { "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. mode => async; 2. acks => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", "05":"At the end it verifies the log size and contents", "06":"Use a consumer to verify no message loss.", "07":"Producer dimensions : mode:sync, acks:-1, comp:0", "08":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "pause_time_in_seconds": "5", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" ======================================================================= ==./system_test/replication_testsuite/testcase_0305/testcase_0305_properties.json ======================================================================= { "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", "05":"At the end it verifies the log size and contents", "06":"Use a consumer to verify no message loss.", "07":"Producer dimensions : mode:sync, acks:-1, comp:0", "08":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "pause_time_in_seconds": "5", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" ======================================================================= ==./system_test/replication_testsuite/testcase_0306/testcase_0306_properties.json ======================================================================= { "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. mode => async; 2. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", "05":"At the end it verifies the log size and contents", "06":"Use a consumer to verify no message loss.", "07":"Producer dimensions : mode:sync, acks:-1, comp:0", "08":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "pause_time_in_seconds": "5", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" ======================================================================= ==./system_test/replication_testsuite/testcase_0307/testcase_0307_properties.json ======================================================================= { "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. acks => 1; 2. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", "05":"At the end it verifies the log size and contents", "06":"Use a consumer to verify no message loss.", "07":"Producer dimensions : mode:sync, acks:-1, comp:0", "08":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "pause_time_in_seconds": "5", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" ======================================================================= ==./system_test/replication_testsuite/testcase_0308/testcase_0308_properties.json ======================================================================= { "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", "05":"At the end it verifies the log size and contents", "06":"Use a consumer to verify no message loss.", "07":"Producer dimensions : mode:sync, acks:-1, comp:0", "08":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "pause_time_in_seconds": "5", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "default.replication.factor": "3", "num.partitions": "3", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" ======================================================================= ==./system_test/replication_testsuite/testcase_0309/testcase_0309_properties.json ======================================================================= { "description": {"01":"Leader Garbage Collection Pauses Simulation in Replication : 1. auto_create_topic => true", "02":"Produce and consume messages to a single topic - three partition.", "03":"This test sends messages to 3 replicas", "04":"To simulate GC Pauses : kill -SIGSTOP => wait N seconds => kill -SIGCONT", "05":"At the end it verifies the log size and contents", "06":"Use a consumer to verify no message loss.", "07":"Producer dimensions : mode:sync, acks:-1, comp:0", "08":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "pause_time_in_seconds": "5", "replica_factor": "3", "num_partition": "3", "num_iteration": "3", "auto_create_topic": "true", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_1_logs", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "num.partitions": "3", "default.replication.factor": "3", "log.segment.bytes": "102400", "log.dir": "/tmp/kafka_server_2_logs", "log_filename": "kafka_server_9092.log", ======================================================================= ==./system_test/replication_testsuite/testcase_1/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9994" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_1/testcase_1_properties.json ======================================================================= { "description": {"01":"To Test : 'Leader Failure in Replication'", "02":"Produce and consume messages to a single topic - single partition.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 10240" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "2", "num_iteration": "2", "sleep_seconds_between_producer_calls": "1", "message_producing_free_time_sec": "15", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_1_logs", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "10240", "log.dir": "/tmp/kafka_server_2_logs", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" }, { "entity_id": "3", ======================================================================= ==./system_test/replication_testsuite/testcase_4001/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4001/testcase_4001_properties.json ======================================================================= { "description": {"01":"Broker Log Retention : Base Test", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4002/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4002/testcase_4002_properties.json ======================================================================= { "description": {"01":"Broker Log Retention : 1. acks => 1", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4003/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4003/testcase_4003_properties.json ======================================================================= { "description": {"01":"Broker Log Retention : 1. acks => -1, comp => 1", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4004/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4004/testcase_4004_properties.json ======================================================================= { "description": {"01":"Broker Log Retention : 1. acks => 1", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4005/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4005/testcase_4005_properties.json ======================================================================= { "description": {"01":"Broker Log Retention : 1. sync => false, acks => -1, comp => 0", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4006/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4006/testcase_4006_properties.json ======================================================================= { "description": {"01":"Broker Log Retention : 1. acks => 1", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4007/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4007/testcase_4007_properties.json ======================================================================= { "description": {"01":"Broker Log Retention : 1. acks => -1, 2. comp => 1", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4008/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4008/testcase_4008_properties.json ======================================================================= { "description": {"01":"Broker Log Retention : 1. acks => 1", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "3", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "3", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4011/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4011/testcase_4011_properties.json ======================================================================= { "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : Base Test", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 2 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "2", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "2", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4012/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4012/testcase_4012_properties.json ======================================================================= { "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 2 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "2", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "2", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4013/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4013/testcase_4013_properties.json ======================================================================= { "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => -1, comp => 1", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 2 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "2", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "2", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4014/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4014/testcase_4014_properties.json ======================================================================= { "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 2 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "2", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "2", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4015/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4015/testcase_4015_properties.json ======================================================================= { "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. sync => false, acks => -1, comp => 0", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 2 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "2", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "2", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4016/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4016/testcase_4016_properties.json ======================================================================= { "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 2 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:0", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "2", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "2", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4017/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4017/testcase_4017_properties.json ======================================================================= { "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => -1, 2. comp => 1", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 2 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:-1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "2", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "2", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_4018/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_4018/testcase_4018_properties.json ======================================================================= { "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1", "02":"Produce and consume messages to 2 topics - 2 partitions", "03":"This test sends messages to 2 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:async, acks:1, comp:1", "10":"Log segment size : 102400" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "true", "replica_factor": "2", "num_partition": "2", "num_iteration": "1", "sleep_seconds_between_producer_calls": "1", "broker_down_time_in_sec": "5", "message_producing_free_time_sec": "15", "log_retention_test": "true" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "default.replication.factor": "2", "num.partitions": "2", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "102400", "log.retention.size": "1048576", "log.dir": "/tmp/kafka_server_2_logs", ======================================================================= ==./system_test/replication_testsuite/testcase_9051/cluster_config.json ======================================================================= { "cluster_config": [ { "entity_id": "0", "hostname": "localhost", "role": "zookeeper", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9990" }, { "entity_id": "1", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9991" }, { "entity_id": "2", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9992" }, { "entity_id": "3", "hostname": "localhost", "role": "broker", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9993" }, { "entity_id": "4", "hostname": "localhost", "role": "producer_performance", "cluster_name": "source", "kafka_home": "default", "java_home": "default", "jmx_port": "9997" }, { "entity_id": "5", "hostname": "localhost", ======================================================================= ==./system_test/replication_testsuite/testcase_9051/testcase_9051_properties.json ======================================================================= { "description": {"01":"To Test : 'Leader Failure in Replication'", "02":"Produce and consume messages to 300 topics - 4 partitions.", "03":"This test sends messages to 3 replicas", "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", "05":"Restart the terminated broker", "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", "07":"At the end it verifies the log size and contents", "08":"Use a consumer to verify no message loss.", "09":"Producer dimensions : mode:sync, acks:-1, comp:0", "10":"Log segment size : 1048576" }, "testcase_args": { "broker_type": "leader", "bounce_broker": "false", "replica_factor": "3", "num_partition": "2", "num_iteration": "1", "producer_multi_topics_mode": "true", "consumer_multi_topics_mode": "true", "sleep_seconds_between_producer_calls": "5", "message_producing_free_time_sec": "15", "num_topics_for_auto_generated_string": "20", "num_messages_to_produce_per_producer_call": "50" }, "entities": [ { "entity_id": "0", "clientPort": "2188", "dataDir": "/tmp/zookeeper_0", "log_filename": "zookeeper_2188.log", "config_filename": "zookeeper_2188.properties" }, { "entity_id": "1", "port": "9091", "broker.id": "1", "log.segment.bytes": "1048576", "log.dir": "/tmp/kafka_server_1_logs", "log_filename": "kafka_server_9091.log", "config_filename": "kafka_server_9091.properties" }, { "entity_id": "2", "port": "9092", "broker.id": "2", "log.segment.bytes": "1048576", "log.dir": "/tmp/kafka_server_2_logs", "log_filename": "kafka_server_9092.log", "config_filename": "kafka_server_9092.properties" ======================================================================= ==./system_test/utils/__init__.py =======================================================================