From b0003315f26d8e9d712e9ec43883e60c9169e317 Mon Sep 17 00:00:00 2001 From: Ewen Cheslack-Postava Date: Thu, 16 Oct 2014 18:17:15 -0700 Subject: [PATCH] KAFKA-1725: Clean up system test output: fix typo in system test case file, incorrectly named system test configuration files, and skip trying to generate metrics graphs when no data is available. --- .../testcase_15001/testcase_15001_properties.json | 158 ++++++++++++++++++ .../testcase_15001/testcase_5001_properties.json | 158 ------------------ .../testcase_15002/testcase_15002_properties.json | 158 ++++++++++++++++++ .../testcase_15002/testcase_5002_properties.json | 158 ------------------ .../testcase_15003/testcase_15003_properties.json | 156 ++++++++++++++++++ .../testcase_15003/testcase_5003_properties.json | 156 ------------------ .../testcase_15004/testcase_15004_properties.json | 156 ++++++++++++++++++ .../testcase_15004/testcase_5004_properties.json | 156 ------------------ .../testcase_15005/testcase_15005_properties.json | 178 +++++++++++++++++++++ .../testcase_15005/testcase_5005_properties.json | 178 --------------------- .../testcase_15006/testcase_15006_properties.json | 178 +++++++++++++++++++++ .../testcase_15006/testcase_5006_properties.json | 178 --------------------- .../testcase_0001/testcase_0001_properties.json | 2 +- .../testcase_10101/testcase_0101_properties.json | 86 ---------- .../testcase_10101/testcase_10101_properties.json | 86 ++++++++++ .../testcase_10102/testcase_0102_properties.json | 86 ---------- .../testcase_10102/testcase_10102_properties.json | 86 ++++++++++ .../testcase_10103/testcase_0103_properties.json | 86 ---------- .../testcase_10103/testcase_10103_properties.json | 86 ++++++++++ .../testcase_10104/testcase_0104_properties.json | 86 ---------- .../testcase_10104/testcase_10104_properties.json | 86 ++++++++++ .../testcase_10105/testcase_0105_properties.json | 86 ---------- .../testcase_10105/testcase_10105_properties.json | 86 ++++++++++ .../testcase_10106/testcase_0106_properties.json | 86 ---------- .../testcase_10106/testcase_10106_properties.json | 86 ++++++++++ .../testcase_10107/testcase_0107_properties.json | 86 ---------- .../testcase_10107/testcase_10107_properties.json | 86 ++++++++++ .../testcase_10108/testcase_0108_properties.json | 86 ---------- .../testcase_10108/testcase_10108_properties.json | 86 ++++++++++ .../testcase_10109/testcase_0109_properties.json | 86 ---------- .../testcase_10109/testcase_10109_properties.json | 86 ++++++++++ .../testcase_10110/testcase_0110_properties.json | 86 ---------- .../testcase_10110/testcase_10110_properties.json | 86 ++++++++++ .../testcase_10131/testcase_0131_properties.json | 110 ------------- .../testcase_10131/testcase_10131_properties.json | 110 +++++++++++++ .../testcase_10132/testcase_0132_properties.json | 107 ------------- .../testcase_10132/testcase_10132_properties.json | 107 +++++++++++++ .../testcase_10133/testcase_0133_properties.json | 107 ------------- .../testcase_10133/testcase_10133_properties.json | 107 +++++++++++++ .../testcase_10134/testcase_0134_properties.json | 92 ----------- .../testcase_10134/testcase_10134_properties.json | 92 +++++++++++ system_test/utils/metrics.py | 2 + 42 files changed, 2263 insertions(+), 2261 deletions(-) create mode 100644 system_test/mirror_maker_testsuite/testcase_15001/testcase_15001_properties.json delete mode 100644 system_test/mirror_maker_testsuite/testcase_15001/testcase_5001_properties.json create mode 100644 system_test/mirror_maker_testsuite/testcase_15002/testcase_15002_properties.json delete mode 100644 system_test/mirror_maker_testsuite/testcase_15002/testcase_5002_properties.json create mode 100644 system_test/mirror_maker_testsuite/testcase_15003/testcase_15003_properties.json delete mode 100644 system_test/mirror_maker_testsuite/testcase_15003/testcase_5003_properties.json create mode 100644 system_test/mirror_maker_testsuite/testcase_15004/testcase_15004_properties.json delete mode 100644 system_test/mirror_maker_testsuite/testcase_15004/testcase_5004_properties.json create mode 100644 system_test/mirror_maker_testsuite/testcase_15005/testcase_15005_properties.json delete mode 100644 system_test/mirror_maker_testsuite/testcase_15005/testcase_5005_properties.json create mode 100644 system_test/mirror_maker_testsuite/testcase_15006/testcase_15006_properties.json delete mode 100644 system_test/mirror_maker_testsuite/testcase_15006/testcase_5006_properties.json delete mode 100644 system_test/replication_testsuite/testcase_10101/testcase_0101_properties.json create mode 100644 system_test/replication_testsuite/testcase_10101/testcase_10101_properties.json delete mode 100644 system_test/replication_testsuite/testcase_10102/testcase_0102_properties.json create mode 100644 system_test/replication_testsuite/testcase_10102/testcase_10102_properties.json delete mode 100644 system_test/replication_testsuite/testcase_10103/testcase_0103_properties.json create mode 100644 system_test/replication_testsuite/testcase_10103/testcase_10103_properties.json delete mode 100644 system_test/replication_testsuite/testcase_10104/testcase_0104_properties.json create mode 100644 system_test/replication_testsuite/testcase_10104/testcase_10104_properties.json delete mode 100644 system_test/replication_testsuite/testcase_10105/testcase_0105_properties.json create mode 100644 system_test/replication_testsuite/testcase_10105/testcase_10105_properties.json delete mode 100644 system_test/replication_testsuite/testcase_10106/testcase_0106_properties.json create mode 100644 system_test/replication_testsuite/testcase_10106/testcase_10106_properties.json delete mode 100644 system_test/replication_testsuite/testcase_10107/testcase_0107_properties.json create mode 100644 system_test/replication_testsuite/testcase_10107/testcase_10107_properties.json delete mode 100644 system_test/replication_testsuite/testcase_10108/testcase_0108_properties.json create mode 100644 system_test/replication_testsuite/testcase_10108/testcase_10108_properties.json delete mode 100644 system_test/replication_testsuite/testcase_10109/testcase_0109_properties.json create mode 100644 system_test/replication_testsuite/testcase_10109/testcase_10109_properties.json delete mode 100644 system_test/replication_testsuite/testcase_10110/testcase_0110_properties.json create mode 100644 system_test/replication_testsuite/testcase_10110/testcase_10110_properties.json delete mode 100644 system_test/replication_testsuite/testcase_10131/testcase_0131_properties.json create mode 100644 system_test/replication_testsuite/testcase_10131/testcase_10131_properties.json delete mode 100644 system_test/replication_testsuite/testcase_10132/testcase_0132_properties.json create mode 100644 system_test/replication_testsuite/testcase_10132/testcase_10132_properties.json delete mode 100644 system_test/replication_testsuite/testcase_10133/testcase_0133_properties.json create mode 100644 system_test/replication_testsuite/testcase_10133/testcase_10133_properties.json delete mode 100644 system_test/replication_testsuite/testcase_10134/testcase_0134_properties.json create mode 100644 system_test/replication_testsuite/testcase_10134/testcase_10134_properties.json diff --git a/system_test/mirror_maker_testsuite/testcase_15001/testcase_15001_properties.json b/system_test/mirror_maker_testsuite/testcase_15001/testcase_15001_properties.json new file mode 100644 index 0000000..9dd3477 --- /dev/null +++ b/system_test/mirror_maker_testsuite/testcase_15001/testcase_15001_properties.json @@ -0,0 +1,158 @@ +{ + "description": {"01":"To Test : 'Replication with Mirror Maker'", + "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", + "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", + "04":"Produce and consume messages to a single topic - single partition.", + "05":"This test sends messages to 3 replicas", + "06":"At the end it verifies the log size and contents", + "07":"Use a consumer to verify no message loss in TARGET cluster.", + "08":"Producer dimensions : mode:sync, acks:-1, comp:0", + "09":"Log segment size : 10240" + }, + "testcase_args": { + "bounce_leader": "false", + "bounce_mirror_maker": "false", + "replica_factor": "3", + "num_partition": "1", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2108", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_0.log", + "config_filename": "zookeeper_0.properties" + }, + { + "entity_id": "1", + "clientPort": "2118", + "dataDir": "/tmp/zookeeper_1", + "log_filename": "zookeeper_1.log", + "config_filename": "zookeeper_1.properties" + }, + + { + "entity_id": "2", + "clientPort": "2128", + "dataDir": "/tmp/zookeeper_2", + "log_filename": "zookeeper_2.log", + "config_filename": "zookeeper_2.properties" + }, + { + "entity_id": "3", + "clientPort": "2138", + "dataDir": "/tmp/zookeeper_3", + "log_filename": "zookeeper_3.log", + "config_filename": "zookeeper_3.properties" + }, + + { + "entity_id": "4", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_4_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_4.log", + "config_filename": "kafka_server_4.properties" + }, + { + "entity_id": "5", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_5_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_5.log", + "config_filename": "kafka_server_5.properties" + }, + { + "entity_id": "6", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_6_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_6.log", + "config_filename": "kafka_server_6.properties" + }, + { + "entity_id": "7", + "port": "9094", + "broker.id": "4", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_7_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_7.log", + "config_filename": "kafka_server_7.properties" + }, + { + "entity_id": "8", + "port": "9095", + "broker.id": "5", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_8_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_8.log", + "config_filename": "kafka_server_8.properties" + }, + { + "entity_id": "9", + "port": "9096", + "broker.id": "6", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_9_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_9.log", + "config_filename": "kafka_server_9.properties" + }, + + { + "entity_id": "10", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"true", + "producer-num-retries":"5", + "log_filename": "producer_performance_10.log", + "config_filename": "producer_performance_10.properties" + }, + { + "entity_id": "11", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_11.log", + "config_filename": "console_consumer_11.properties" + }, + + { + "entity_id": "12", + "log_filename": "mirror_maker_12.log", + "mirror_consumer_config_filename": "mirror_consumer_12.properties", + "mirror_producer_config_filename": "mirror_producer_12.properties" + }, + + { + "entity_id": "13", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_13.log", + "config_filename": "console_consumer_13.properties" + } + ] +} diff --git a/system_test/mirror_maker_testsuite/testcase_15001/testcase_5001_properties.json b/system_test/mirror_maker_testsuite/testcase_15001/testcase_5001_properties.json deleted file mode 100644 index 9dd3477..0000000 --- a/system_test/mirror_maker_testsuite/testcase_15001/testcase_5001_properties.json +++ /dev/null @@ -1,158 +0,0 @@ -{ - "description": {"01":"To Test : 'Replication with Mirror Maker'", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to a single topic - single partition.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:sync, acks:-1, comp:0", - "09":"Log segment size : 10240" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "false", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "500", - "request-num-acks": "-1", - "sync":"true", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_11.log", - "config_filename": "console_consumer_11.properties" - }, - - { - "entity_id": "12", - "log_filename": "mirror_maker_12.log", - "mirror_consumer_config_filename": "mirror_consumer_12.properties", - "mirror_producer_config_filename": "mirror_producer_12.properties" - }, - - { - "entity_id": "13", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_13.log", - "config_filename": "console_consumer_13.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_15002/testcase_15002_properties.json b/system_test/mirror_maker_testsuite/testcase_15002/testcase_15002_properties.json new file mode 100644 index 0000000..d6495e5 --- /dev/null +++ b/system_test/mirror_maker_testsuite/testcase_15002/testcase_15002_properties.json @@ -0,0 +1,158 @@ +{ + "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", + "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", + "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", + "04":"Produce and consume messages to a single topic - single partition.", + "05":"This test sends messages to 3 replicas", + "06":"At the end it verifies the log size and contents", + "07":"Use a consumer to verify no message loss in TARGET cluster.", + "08":"Producer dimensions : mode:sync, acks:-1, comp:0", + "09":"Log segment size : 20480" + }, + "testcase_args": { + "bounce_leader": "false", + "bounce_mirror_maker": "true", + "replica_factor": "3", + "num_partition": "1", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2108", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_0.log", + "config_filename": "zookeeper_0.properties" + }, + { + "entity_id": "1", + "clientPort": "2118", + "dataDir": "/tmp/zookeeper_1", + "log_filename": "zookeeper_1.log", + "config_filename": "zookeeper_1.properties" + }, + + { + "entity_id": "2", + "clientPort": "2128", + "dataDir": "/tmp/zookeeper_2", + "log_filename": "zookeeper_2.log", + "config_filename": "zookeeper_2.properties" + }, + { + "entity_id": "3", + "clientPort": "2138", + "dataDir": "/tmp/zookeeper_3", + "log_filename": "zookeeper_3.log", + "config_filename": "zookeeper_3.properties" + }, + + { + "entity_id": "4", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_4_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_4.log", + "config_filename": "kafka_server_4.properties" + }, + { + "entity_id": "5", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_5_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_5.log", + "config_filename": "kafka_server_5.properties" + }, + { + "entity_id": "6", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_6_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_6.log", + "config_filename": "kafka_server_6.properties" + }, + { + "entity_id": "7", + "port": "9094", + "broker.id": "4", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_7_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_7.log", + "config_filename": "kafka_server_7.properties" + }, + { + "entity_id": "8", + "port": "9095", + "broker.id": "5", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_8_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_8.log", + "config_filename": "kafka_server_8.properties" + }, + { + "entity_id": "9", + "port": "9096", + "broker.id": "6", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_9_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_9.log", + "config_filename": "kafka_server_9.properties" + }, + + { + "entity_id": "10", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"true", + "producer-num-retries":"5", + "log_filename": "producer_performance_10.log", + "config_filename": "producer_performance_10.properties" + }, + { + "entity_id": "11", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_11.log", + "config_filename": "console_consumer_11.properties" + }, + + { + "entity_id": "12", + "log_filename": "mirror_maker_12.log", + "mirror_consumer_config_filename": "mirror_consumer_12.properties", + "mirror_producer_config_filename": "mirror_producer_12.properties" + }, + + { + "entity_id": "13", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_13.log", + "config_filename": "console_consumer_13.properties" + } + ] +} diff --git a/system_test/mirror_maker_testsuite/testcase_15002/testcase_5002_properties.json b/system_test/mirror_maker_testsuite/testcase_15002/testcase_5002_properties.json deleted file mode 100644 index d6495e5..0000000 --- a/system_test/mirror_maker_testsuite/testcase_15002/testcase_5002_properties.json +++ /dev/null @@ -1,158 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to a single topic - single partition.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:sync, acks:-1, comp:0", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"true", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_11.log", - "config_filename": "console_consumer_11.properties" - }, - - { - "entity_id": "12", - "log_filename": "mirror_maker_12.log", - "mirror_consumer_config_filename": "mirror_consumer_12.properties", - "mirror_producer_config_filename": "mirror_producer_12.properties" - }, - - { - "entity_id": "13", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_13.log", - "config_filename": "console_consumer_13.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_15003/testcase_15003_properties.json b/system_test/mirror_maker_testsuite/testcase_15003/testcase_15003_properties.json new file mode 100644 index 0000000..842c70e --- /dev/null +++ b/system_test/mirror_maker_testsuite/testcase_15003/testcase_15003_properties.json @@ -0,0 +1,156 @@ +{ + "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", + "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", + "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", + "04":"Produce and consume messages to a single topic - single partition.", + "05":"This test sends messages to 3 replicas", + "06":"At the end it verifies the log size and contents", + "07":"Use a consumer to verify no message loss in TARGET cluster.", + "08":"Producer dimensions : mode:async, acks:-1, comp:1", + "09":"Log segment size : 20480" + }, + "testcase_args": { + "bounce_leader": "false", + "bounce_mirror_maker": "true", + "bounced_entity_downtime_sec": "30", + "replica_factor": "3", + "num_partition": "1", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2108", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_0.log", + "config_filename": "zookeeper_0.properties" + }, + { + "entity_id": "1", + "clientPort": "2118", + "dataDir": "/tmp/zookeeper_1", + "log_filename": "zookeeper_1.log", + "config_filename": "zookeeper_1.properties" + }, + + { + "entity_id": "2", + "clientPort": "2128", + "dataDir": "/tmp/zookeeper_2", + "log_filename": "zookeeper_2.log", + "config_filename": "zookeeper_2.properties" + }, + { + "entity_id": "3", + "clientPort": "2138", + "dataDir": "/tmp/zookeeper_3", + "log_filename": "zookeeper_3.log", + "config_filename": "zookeeper_3.properties" + }, + + { + "entity_id": "4", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_4_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_4.log", + "config_filename": "kafka_server_4.properties" + }, + { + "entity_id": "5", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_5_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_5.log", + "config_filename": "kafka_server_5.properties" + }, + { + "entity_id": "6", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_6_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_6.log", + "config_filename": "kafka_server_6.properties" + }, + { + "entity_id": "7", + "port": "9094", + "broker.id": "4", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_7_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_7.log", + "config_filename": "kafka_server_7.properties" + }, + { + "entity_id": "8", + "port": "9095", + "broker.id": "5", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_8_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_8.log", + "config_filename": "kafka_server_8.properties" + }, + { + "entity_id": "9", + "port": "9096", + "broker.id": "6", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_9_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_9.log", + "config_filename": "kafka_server_9.properties" + }, + + { + "entity_id": "10", + "topic": "test_1", + "threads": "5", + "compression-codec": "2", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"false", + "producer-num-retries":"5", + "log_filename": "producer_performance_10.log", + "config_filename": "producer_performance_10.properties" + }, + { + "entity_id": "11", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_11.log", + "config_filename": "console_consumer_11.properties" + }, + + { + "entity_id": "12", + "log_filename": "mirror_maker_12.log", + "mirror_consumer_config_filename": "mirror_consumer_12.properties", + "mirror_producer_config_filename": "mirror_producer_12.properties" + }, + { + "entity_id": "13", + "log_filename": "mirror_maker_13.log", + "mirror_consumer_config_filename": "mirror_consumer_13.properties", + "mirror_producer_config_filename": "mirror_producer_13.properties" + } + ] +} diff --git a/system_test/mirror_maker_testsuite/testcase_15003/testcase_5003_properties.json b/system_test/mirror_maker_testsuite/testcase_15003/testcase_5003_properties.json deleted file mode 100644 index 842c70e..0000000 --- a/system_test/mirror_maker_testsuite/testcase_15003/testcase_5003_properties.json +++ /dev/null @@ -1,156 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to a single topic - single partition.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:async, acks:-1, comp:1", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "bounced_entity_downtime_sec": "30", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "topic": "test_1", - "threads": "5", - "compression-codec": "2", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_11.log", - "config_filename": "console_consumer_11.properties" - }, - - { - "entity_id": "12", - "log_filename": "mirror_maker_12.log", - "mirror_consumer_config_filename": "mirror_consumer_12.properties", - "mirror_producer_config_filename": "mirror_producer_12.properties" - }, - { - "entity_id": "13", - "log_filename": "mirror_maker_13.log", - "mirror_consumer_config_filename": "mirror_consumer_13.properties", - "mirror_producer_config_filename": "mirror_producer_13.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_15004/testcase_15004_properties.json b/system_test/mirror_maker_testsuite/testcase_15004/testcase_15004_properties.json new file mode 100644 index 0000000..48864e6 --- /dev/null +++ b/system_test/mirror_maker_testsuite/testcase_15004/testcase_15004_properties.json @@ -0,0 +1,156 @@ +{ + "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", + "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", + "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", + "04":"Produce and consume messages to a single topic - single partition.", + "05":"This test sends messages to 3 replicas", + "06":"At the end it verifies the log size and contents", + "07":"Use a consumer to verify no message loss in TARGET cluster.", + "08":"Producer dimensions : mode:async, acks:1, comp:1", + "09":"Log segment size : 20480" + }, + "testcase_args": { + "bounce_leader": "false", + "bounce_mirror_maker": "true", + "bounced_entity_downtime_sec": "30", + "replica_factor": "3", + "num_partition": "1", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2108", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_0.log", + "config_filename": "zookeeper_0.properties" + }, + { + "entity_id": "1", + "clientPort": "2118", + "dataDir": "/tmp/zookeeper_1", + "log_filename": "zookeeper_1.log", + "config_filename": "zookeeper_1.properties" + }, + + { + "entity_id": "2", + "clientPort": "2128", + "dataDir": "/tmp/zookeeper_2", + "log_filename": "zookeeper_2.log", + "config_filename": "zookeeper_2.properties" + }, + { + "entity_id": "3", + "clientPort": "2138", + "dataDir": "/tmp/zookeeper_3", + "log_filename": "zookeeper_3.log", + "config_filename": "zookeeper_3.properties" + }, + + { + "entity_id": "4", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_4_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_4.log", + "config_filename": "kafka_server_4.properties" + }, + { + "entity_id": "5", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_5_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_5.log", + "config_filename": "kafka_server_5.properties" + }, + { + "entity_id": "6", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_6_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_6.log", + "config_filename": "kafka_server_6.properties" + }, + { + "entity_id": "7", + "port": "9094", + "broker.id": "4", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_7_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_7.log", + "config_filename": "kafka_server_7.properties" + }, + { + "entity_id": "8", + "port": "9095", + "broker.id": "5", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_8_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_8.log", + "config_filename": "kafka_server_8.properties" + }, + { + "entity_id": "9", + "port": "9096", + "broker.id": "6", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_9_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_9.log", + "config_filename": "kafka_server_9.properties" + }, + + { + "entity_id": "10", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"false", + "producer-num-retries":"5", + "log_filename": "producer_performance_10.log", + "config_filename": "producer_performance_10.properties" + }, + { + "entity_id": "11", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_11.log", + "config_filename": "console_consumer_11.properties" + }, + + { + "entity_id": "12", + "log_filename": "mirror_maker_12.log", + "mirror_consumer_config_filename": "mirror_consumer_12.properties", + "mirror_producer_config_filename": "mirror_producer_12.properties" + }, + { + "entity_id": "13", + "log_filename": "mirror_maker_13.log", + "mirror_consumer_config_filename": "mirror_consumer_13.properties", + "mirror_producer_config_filename": "mirror_producer_13.properties" + } + ] +} diff --git a/system_test/mirror_maker_testsuite/testcase_15004/testcase_5004_properties.json b/system_test/mirror_maker_testsuite/testcase_15004/testcase_5004_properties.json deleted file mode 100644 index 48864e6..0000000 --- a/system_test/mirror_maker_testsuite/testcase_15004/testcase_5004_properties.json +++ /dev/null @@ -1,156 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to a single topic - single partition.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:async, acks:1, comp:1", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "bounced_entity_downtime_sec": "30", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_11.log", - "config_filename": "console_consumer_11.properties" - }, - - { - "entity_id": "12", - "log_filename": "mirror_maker_12.log", - "mirror_consumer_config_filename": "mirror_consumer_12.properties", - "mirror_producer_config_filename": "mirror_producer_12.properties" - }, - { - "entity_id": "13", - "log_filename": "mirror_maker_13.log", - "mirror_consumer_config_filename": "mirror_consumer_13.properties", - "mirror_producer_config_filename": "mirror_producer_13.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_15005/testcase_15005_properties.json b/system_test/mirror_maker_testsuite/testcase_15005/testcase_15005_properties.json new file mode 100644 index 0000000..92b2a6b --- /dev/null +++ b/system_test/mirror_maker_testsuite/testcase_15005/testcase_15005_properties.json @@ -0,0 +1,178 @@ +{ + "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", + "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", + "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", + "04":"Produce and consume messages to 2 topics - 2 partitions.", + "05":"This test sends messages to 3 replicas", + "06":"At the end it verifies the log size and contents", + "07":"Use a consumer to verify no message loss in TARGET cluster.", + "08":"Producer dimensions : mode:async, acks:-1, comp:1", + "09":"Log segment size : 20480" + }, + "testcase_args": { + "bounce_leader": "false", + "bounce_mirror_maker": "true", + "bounced_entity_downtime_sec": "30", + "replica_factor": "3", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2108", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_0.log", + "config_filename": "zookeeper_0.properties" + }, + { + "entity_id": "1", + "clientPort": "2118", + "dataDir": "/tmp/zookeeper_1", + "log_filename": "zookeeper_1.log", + "config_filename": "zookeeper_1.properties" + }, + + { + "entity_id": "2", + "clientPort": "2128", + "dataDir": "/tmp/zookeeper_2", + "log_filename": "zookeeper_2.log", + "config_filename": "zookeeper_2.properties" + }, + { + "entity_id": "3", + "clientPort": "2138", + "dataDir": "/tmp/zookeeper_3", + "log_filename": "zookeeper_3.log", + "config_filename": "zookeeper_3.properties" + }, + + { + "entity_id": "4", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_4_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_4.log", + "config_filename": "kafka_server_4.properties" + }, + { + "entity_id": "5", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_5_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_5.log", + "config_filename": "kafka_server_5.properties" + }, + { + "entity_id": "6", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_6_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_6.log", + "config_filename": "kafka_server_6.properties" + }, + { + "entity_id": "7", + "port": "9094", + "broker.id": "4", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_7_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_7.log", + "config_filename": "kafka_server_7.properties" + }, + { + "entity_id": "8", + "port": "9095", + "broker.id": "5", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_8_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_8.log", + "config_filename": "kafka_server_8.properties" + }, + { + "entity_id": "9", + "port": "9096", + "broker.id": "6", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_9_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_9.log", + "config_filename": "kafka_server_9.properties" + }, + + { + "entity_id": "10", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"false", + "producer-num-retries":"5", + "log_filename": "producer_performance_10.log", + "config_filename": "producer_performance_10.properties" + }, + { + "entity_id": "11", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"false", + "producer-num-retries":"5", + "log_filename": "producer_performance_11.log", + "config_filename": "producer_performance_11.properties" + }, + + { + "entity_id": "12", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_12.log", + "config_filename": "console_consumer_12.properties" + }, + { + "entity_id": "13", + "topic": "test_2", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_13.log", + "config_filename": "console_consumer_13.properties" + }, + + { + "entity_id": "14", + "log_filename": "mirror_maker_14.log", + "mirror_consumer_config_filename": "mirror_consumer_14.properties", + "mirror_producer_config_filename": "mirror_producer_14.properties" + }, + { + "entity_id": "15", + "log_filename": "mirror_maker_15.log", + "mirror_consumer_config_filename": "mirror_consumer_15.properties", + "mirror_producer_config_filename": "mirror_producer_15.properties" + } + ] +} diff --git a/system_test/mirror_maker_testsuite/testcase_15005/testcase_5005_properties.json b/system_test/mirror_maker_testsuite/testcase_15005/testcase_5005_properties.json deleted file mode 100644 index 92b2a6b..0000000 --- a/system_test/mirror_maker_testsuite/testcase_15005/testcase_5005_properties.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to 2 topics - 2 partitions.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:async, acks:-1, comp:1", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "bounced_entity_downtime_sec": "30", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_11.log", - "config_filename": "producer_performance_11.properties" - }, - - { - "entity_id": "12", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_12.log", - "config_filename": "console_consumer_12.properties" - }, - { - "entity_id": "13", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_13.log", - "config_filename": "console_consumer_13.properties" - }, - - { - "entity_id": "14", - "log_filename": "mirror_maker_14.log", - "mirror_consumer_config_filename": "mirror_consumer_14.properties", - "mirror_producer_config_filename": "mirror_producer_14.properties" - }, - { - "entity_id": "15", - "log_filename": "mirror_maker_15.log", - "mirror_consumer_config_filename": "mirror_consumer_15.properties", - "mirror_producer_config_filename": "mirror_producer_15.properties" - } - ] -} diff --git a/system_test/mirror_maker_testsuite/testcase_15006/testcase_15006_properties.json b/system_test/mirror_maker_testsuite/testcase_15006/testcase_15006_properties.json new file mode 100644 index 0000000..7d5019c --- /dev/null +++ b/system_test/mirror_maker_testsuite/testcase_15006/testcase_15006_properties.json @@ -0,0 +1,178 @@ +{ + "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", + "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", + "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", + "04":"Produce and consume messages to 2 topics - 2 partitions.", + "05":"This test sends messages to 3 replicas", + "06":"At the end it verifies the log size and contents", + "07":"Use a consumer to verify no message loss in TARGET cluster.", + "08":"Producer dimensions : mode:async, acks:1, comp:1", + "09":"Log segment size : 20480" + }, + "testcase_args": { + "bounce_leader": "false", + "bounce_mirror_maker": "true", + "bounced_entity_downtime_sec": "30", + "replica_factor": "3", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2108", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_0.log", + "config_filename": "zookeeper_0.properties" + }, + { + "entity_id": "1", + "clientPort": "2118", + "dataDir": "/tmp/zookeeper_1", + "log_filename": "zookeeper_1.log", + "config_filename": "zookeeper_1.properties" + }, + + { + "entity_id": "2", + "clientPort": "2128", + "dataDir": "/tmp/zookeeper_2", + "log_filename": "zookeeper_2.log", + "config_filename": "zookeeper_2.properties" + }, + { + "entity_id": "3", + "clientPort": "2138", + "dataDir": "/tmp/zookeeper_3", + "log_filename": "zookeeper_3.log", + "config_filename": "zookeeper_3.properties" + }, + + { + "entity_id": "4", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_4_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_4.log", + "config_filename": "kafka_server_4.properties" + }, + { + "entity_id": "5", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_5_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_5.log", + "config_filename": "kafka_server_5.properties" + }, + { + "entity_id": "6", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_6_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_6.log", + "config_filename": "kafka_server_6.properties" + }, + { + "entity_id": "7", + "port": "9094", + "broker.id": "4", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_7_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_7.log", + "config_filename": "kafka_server_7.properties" + }, + { + "entity_id": "8", + "port": "9095", + "broker.id": "5", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_8_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_8.log", + "config_filename": "kafka_server_8.properties" + }, + { + "entity_id": "9", + "port": "9096", + "broker.id": "6", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_9_logs", + "default.replication.factor": "3", + "num.partitions": "5", + "log_filename": "kafka_server_9.log", + "config_filename": "kafka_server_9.properties" + }, + + { + "entity_id": "10", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"false", + "producer-num-retries":"5", + "log_filename": "producer_performance_10.log", + "config_filename": "producer_performance_10.properties" + }, + { + "entity_id": "11", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"false", + "producer-num-retries":"5", + "log_filename": "producer_performance_11.log", + "config_filename": "producer_performance_11.properties" + }, + + { + "entity_id": "12", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_12.log", + "config_filename": "console_consumer_12.properties" + }, + { + "entity_id": "13", + "topic": "test_2", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_13.log", + "config_filename": "console_consumer_13.properties" + }, + + { + "entity_id": "14", + "log_filename": "mirror_maker_14.log", + "mirror_consumer_config_filename": "mirror_consumer_14.properties", + "mirror_producer_config_filename": "mirror_producer_14.properties" + }, + { + "entity_id": "15", + "log_filename": "mirror_maker_15.log", + "mirror_consumer_config_filename": "mirror_consumer_15.properties", + "mirror_producer_config_filename": "mirror_producer_15.properties" + } + ] +} diff --git a/system_test/mirror_maker_testsuite/testcase_15006/testcase_5006_properties.json b/system_test/mirror_maker_testsuite/testcase_15006/testcase_5006_properties.json deleted file mode 100644 index 7d5019c..0000000 --- a/system_test/mirror_maker_testsuite/testcase_15006/testcase_5006_properties.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", - "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", - "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", - "04":"Produce and consume messages to 2 topics - 2 partitions.", - "05":"This test sends messages to 3 replicas", - "06":"At the end it verifies the log size and contents", - "07":"Use a consumer to verify no message loss in TARGET cluster.", - "08":"Producer dimensions : mode:async, acks:1, comp:1", - "09":"Log segment size : 20480" - }, - "testcase_args": { - "bounce_leader": "false", - "bounce_mirror_maker": "true", - "bounced_entity_downtime_sec": "30", - "replica_factor": "3", - "num_partition": "2", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2108", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_0.log", - "config_filename": "zookeeper_0.properties" - }, - { - "entity_id": "1", - "clientPort": "2118", - "dataDir": "/tmp/zookeeper_1", - "log_filename": "zookeeper_1.log", - "config_filename": "zookeeper_1.properties" - }, - - { - "entity_id": "2", - "clientPort": "2128", - "dataDir": "/tmp/zookeeper_2", - "log_filename": "zookeeper_2.log", - "config_filename": "zookeeper_2.properties" - }, - { - "entity_id": "3", - "clientPort": "2138", - "dataDir": "/tmp/zookeeper_3", - "log_filename": "zookeeper_3.log", - "config_filename": "zookeeper_3.properties" - }, - - { - "entity_id": "4", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_4_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_4.log", - "config_filename": "kafka_server_4.properties" - }, - { - "entity_id": "5", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_5_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_5.log", - "config_filename": "kafka_server_5.properties" - }, - { - "entity_id": "6", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_6_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_6.log", - "config_filename": "kafka_server_6.properties" - }, - { - "entity_id": "7", - "port": "9094", - "broker.id": "4", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_7_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_7.log", - "config_filename": "kafka_server_7.properties" - }, - { - "entity_id": "8", - "port": "9095", - "broker.id": "5", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_8_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_8.log", - "config_filename": "kafka_server_8.properties" - }, - { - "entity_id": "9", - "port": "9096", - "broker.id": "6", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_9_logs", - "default.replication.factor": "3", - "num.partitions": "5", - "log_filename": "kafka_server_9.log", - "config_filename": "kafka_server_9.properties" - }, - - { - "entity_id": "10", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_10.log", - "config_filename": "producer_performance_10.properties" - }, - { - "entity_id": "11", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "producer-num-retries":"5", - "log_filename": "producer_performance_11.log", - "config_filename": "producer_performance_11.properties" - }, - - { - "entity_id": "12", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_12.log", - "config_filename": "console_consumer_12.properties" - }, - { - "entity_id": "13", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_13.log", - "config_filename": "console_consumer_13.properties" - }, - - { - "entity_id": "14", - "log_filename": "mirror_maker_14.log", - "mirror_consumer_config_filename": "mirror_consumer_14.properties", - "mirror_producer_config_filename": "mirror_producer_14.properties" - }, - { - "entity_id": "15", - "log_filename": "mirror_maker_15.log", - "mirror_consumer_config_filename": "mirror_consumer_15.properties", - "mirror_producer_config_filename": "mirror_producer_15.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_0001/testcase_0001_properties.json b/system_test/replication_testsuite/testcase_0001/testcase_0001_properties.json index 308f193..250ffe0 100644 --- a/system_test/replication_testsuite/testcase_0001/testcase_0001_properties.json +++ b/system_test/replication_testsuite/testcase_0001/testcase_0001_properties.json @@ -5,7 +5,7 @@ "04":"At the end it verifies the log size and contents", "05":"Use a consumer to verify no message loss.", "06":"Producer dimensions : mode:sync, acks:-1, comp:0", - "07":"Log segment size : 20480 + "07":"Log segment size : 20480" }, "testcase_args": { "broker_type": "leader", diff --git a/system_test/replication_testsuite/testcase_10101/testcase_0101_properties.json b/system_test/replication_testsuite/testcase_10101/testcase_0101_properties.json deleted file mode 100644 index 3f8e587..0000000 --- a/system_test/replication_testsuite/testcase_10101/testcase_0101_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : Base Test", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10101/testcase_10101_properties.json b/system_test/replication_testsuite/testcase_10101/testcase_10101_properties.json new file mode 100644 index 0000000..3f8e587 --- /dev/null +++ b/system_test/replication_testsuite/testcase_10101/testcase_10101_properties.json @@ -0,0 +1,86 @@ +{ + "description": {"01":"Leader Failure in Replication : Base Test", + "02":"Produce and consume messages to a single topic - single partition.", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:-1, comp:0", + "10":"Log segment size : 20480" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "1", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_1_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_2_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_3_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance.log", + "config_filename": "producer_performance.properties" + }, + { + "entity_id": "5", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "zookeeper": "localhost:2188", + "log_filename": "console_consumer.log", + "config_filename": "console_consumer.properties" + } + ] +} diff --git a/system_test/replication_testsuite/testcase_10102/testcase_0102_properties.json b/system_test/replication_testsuite/testcase_10102/testcase_0102_properties.json deleted file mode 100644 index c96352d..0000000 --- a/system_test/replication_testsuite/testcase_10102/testcase_0102_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. comp => 1", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10102/testcase_10102_properties.json b/system_test/replication_testsuite/testcase_10102/testcase_10102_properties.json new file mode 100644 index 0000000..c96352d --- /dev/null +++ b/system_test/replication_testsuite/testcase_10102/testcase_10102_properties.json @@ -0,0 +1,86 @@ +{ + "description": {"01":"Leader Failure in Replication : 1. comp => 1", + "02":"Produce and consume messages to a single topic - single partition.", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:-1, comp:1", + "10":"Log segment size : 20480" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "1", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_1_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_2_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_3_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance.log", + "config_filename": "producer_performance.properties" + }, + { + "entity_id": "5", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "zookeeper": "localhost:2188", + "log_filename": "console_consumer.log", + "config_filename": "console_consumer.properties" + } + ] +} diff --git a/system_test/replication_testsuite/testcase_10103/testcase_0103_properties.json b/system_test/replication_testsuite/testcase_10103/testcase_0103_properties.json deleted file mode 100644 index 55fa39e..0000000 --- a/system_test/replication_testsuite/testcase_10103/testcase_0103_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. acks => 1; 2. comp => 1", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10103/testcase_10103_properties.json b/system_test/replication_testsuite/testcase_10103/testcase_10103_properties.json new file mode 100644 index 0000000..55fa39e --- /dev/null +++ b/system_test/replication_testsuite/testcase_10103/testcase_10103_properties.json @@ -0,0 +1,86 @@ +{ + "description": {"01":"Leader Failure in Replication : 1. acks => 1; 2. comp => 1", + "02":"Produce and consume messages to a single topic - single partition.", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:1, comp:1", + "10":"Log segment size : 20480" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "1", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_1_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_2_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_3_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance.log", + "config_filename": "producer_performance.properties" + }, + { + "entity_id": "5", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "zookeeper": "localhost:2188", + "log_filename": "console_consumer.log", + "config_filename": "console_consumer.properties" + } + ] +} diff --git a/system_test/replication_testsuite/testcase_10104/testcase_0104_properties.json b/system_test/replication_testsuite/testcase_10104/testcase_0104_properties.json deleted file mode 100644 index 15827eb..0000000 --- a/system_test/replication_testsuite/testcase_10104/testcase_0104_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. comp => 0", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10104/testcase_10104_properties.json b/system_test/replication_testsuite/testcase_10104/testcase_10104_properties.json new file mode 100644 index 0000000..15827eb --- /dev/null +++ b/system_test/replication_testsuite/testcase_10104/testcase_10104_properties.json @@ -0,0 +1,86 @@ +{ + "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. comp => 0", + "02":"Produce and consume messages to a single topic - single partition.", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:-1, comp:1", + "10":"Log segment size : 20480" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "1", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_1_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_2_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_3_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"false", + "log_filename": "producer_performance.log", + "config_filename": "producer_performance.properties" + }, + { + "entity_id": "5", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "zookeeper": "localhost:2188", + "log_filename": "console_consumer.log", + "config_filename": "console_consumer.properties" + } + ] +} diff --git a/system_test/replication_testsuite/testcase_10105/testcase_0105_properties.json b/system_test/replication_testsuite/testcase_10105/testcase_0105_properties.json deleted file mode 100644 index d1fa1ad..0000000 --- a/system_test/replication_testsuite/testcase_10105/testcase_0105_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", - "02":"Produce and consume messages to a single topic - single partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "1", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "1", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10105/testcase_10105_properties.json b/system_test/replication_testsuite/testcase_10105/testcase_10105_properties.json new file mode 100644 index 0000000..d1fa1ad --- /dev/null +++ b/system_test/replication_testsuite/testcase_10105/testcase_10105_properties.json @@ -0,0 +1,86 @@ +{ + "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp => 1", + "02":"Produce and consume messages to a single topic - single partition.", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:1, comp:1", + "10":"Log segment size : 20480" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "1", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_1_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_2_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_3_logs", + "default.replication.factor": "3", + "num.partitions": "1", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance.log", + "config_filename": "producer_performance.properties" + }, + { + "entity_id": "5", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "zookeeper": "localhost:2188", + "log_filename": "console_consumer.log", + "config_filename": "console_consumer.properties" + } + ] +} diff --git a/system_test/replication_testsuite/testcase_10106/testcase_0106_properties.json b/system_test/replication_testsuite/testcase_10106/testcase_0106_properties.json deleted file mode 100644 index 675c76f..0000000 --- a/system_test/replication_testsuite/testcase_10106/testcase_0106_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. comp => 1; 2. no of partion => 3", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10106/testcase_10106_properties.json b/system_test/replication_testsuite/testcase_10106/testcase_10106_properties.json new file mode 100644 index 0000000..675c76f --- /dev/null +++ b/system_test/replication_testsuite/testcase_10106/testcase_10106_properties.json @@ -0,0 +1,86 @@ +{ + "description": {"01":"Leader Failure in Replication : 1. comp => 1; 2. no of partion => 3", + "02":"Produce and consume messages to a single topic - 3 partition.", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:-1, comp:1", + "10":"Log segment size : 20480" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "3", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_1_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_2_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_3_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance.log", + "config_filename": "producer_performance.properties" + }, + { + "entity_id": "5", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "zookeeper": "localhost:2188", + "log_filename": "console_consumer.log", + "config_filename": "console_consumer.properties" + } + ] +} diff --git a/system_test/replication_testsuite/testcase_10107/testcase_0107_properties.json b/system_test/replication_testsuite/testcase_10107/testcase_0107_properties.json deleted file mode 100644 index afc221c..0000000 --- a/system_test/replication_testsuite/testcase_10107/testcase_0107_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. comp => 1; 3. no of partition => 3", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:-1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10107/testcase_10107_properties.json b/system_test/replication_testsuite/testcase_10107/testcase_10107_properties.json new file mode 100644 index 0000000..afc221c --- /dev/null +++ b/system_test/replication_testsuite/testcase_10107/testcase_10107_properties.json @@ -0,0 +1,86 @@ +{ + "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. comp => 1; 3. no of partition => 3", + "02":"Produce and consume messages to a single topic - 3 partition.", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:-1, comp:1", + "10":"Log segment size : 20480" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "3", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_1_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_2_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_3_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"false", + "log_filename": "producer_performance.log", + "config_filename": "producer_performance.properties" + }, + { + "entity_id": "5", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "zookeeper": "localhost:2188", + "log_filename": "console_consumer.log", + "config_filename": "console_consumer.properties" + } + ] +} diff --git a/system_test/replication_testsuite/testcase_10108/testcase_0108_properties.json b/system_test/replication_testsuite/testcase_10108/testcase_0108_properties.json deleted file mode 100644 index 5df72f3..0000000 --- a/system_test/replication_testsuite/testcase_10108/testcase_0108_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. acks => 1; 2. comp => 1; 3. no. of partition => 3", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"true", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10108/testcase_10108_properties.json b/system_test/replication_testsuite/testcase_10108/testcase_10108_properties.json new file mode 100644 index 0000000..5df72f3 --- /dev/null +++ b/system_test/replication_testsuite/testcase_10108/testcase_10108_properties.json @@ -0,0 +1,86 @@ +{ + "description": {"01":"Leader Failure in Replication : 1. acks => 1; 2. comp => 1; 3. no. of partition => 3", + "02":"Produce and consume messages to a single topic - 3 partition.", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:1, comp:1", + "10":"Log segment size : 20480" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "3", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_1_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_2_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_3_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance.log", + "config_filename": "producer_performance.properties" + }, + { + "entity_id": "5", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "zookeeper": "localhost:2188", + "log_filename": "console_consumer.log", + "config_filename": "console_consumer.properties" + } + ] +} diff --git a/system_test/replication_testsuite/testcase_10109/testcase_0109_properties.json b/system_test/replication_testsuite/testcase_10109/testcase_0109_properties.json deleted file mode 100644 index 9b15668..0000000 --- a/system_test/replication_testsuite/testcase_10109/testcase_0109_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp =>; 4. no. of partitions => 3", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 20480" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "20480", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10109/testcase_10109_properties.json b/system_test/replication_testsuite/testcase_10109/testcase_10109_properties.json new file mode 100644 index 0000000..9b15668 --- /dev/null +++ b/system_test/replication_testsuite/testcase_10109/testcase_10109_properties.json @@ -0,0 +1,86 @@ +{ + "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp =>; 4. no. of partitions => 3", + "02":"Produce and consume messages to a single topic - 3 partition.", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:1, comp:1", + "10":"Log segment size : 20480" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "3", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_1_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_2_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "20480", + "log.dir": "/tmp/kafka_server_3_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance.log", + "config_filename": "producer_performance.properties" + }, + { + "entity_id": "5", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "zookeeper": "localhost:2188", + "log_filename": "console_consumer.log", + "config_filename": "console_consumer.properties" + } + ] +} diff --git a/system_test/replication_testsuite/testcase_10110/testcase_0110_properties.json b/system_test/replication_testsuite/testcase_10110/testcase_0110_properties.json deleted file mode 100644 index f51abc1..0000000 --- a/system_test/replication_testsuite/testcase_10110/testcase_0110_properties.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp =>; 4. no. of partitins => 3; 5. log segment size => 1M", - "02":"Produce and consume messages to a single topic - 3 partition.", - "03":"This test sends messages to 3 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 1048576 (1M)" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "3", - "num_partition": "3", - "num_iteration": "1", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15", - "num_messages_to_produce_per_producer_call": "50" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "1048576", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "3", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "sync":"false", - "log_filename": "producer_performance.log", - "config_filename": "producer_performance.properties" - }, - { - "entity_id": "5", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "zookeeper": "localhost:2188", - "log_filename": "console_consumer.log", - "config_filename": "console_consumer.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10110/testcase_10110_properties.json b/system_test/replication_testsuite/testcase_10110/testcase_10110_properties.json new file mode 100644 index 0000000..f51abc1 --- /dev/null +++ b/system_test/replication_testsuite/testcase_10110/testcase_10110_properties.json @@ -0,0 +1,86 @@ +{ + "description": {"01":"Leader Failure in Replication : 1. mode => async; 2. acks => 1; 3. comp =>; 4. no. of partitins => 3; 5. log segment size => 1M", + "02":"Produce and consume messages to a single topic - 3 partition.", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:1, comp:1", + "10":"Log segment size : 1048576 (1M)" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "3", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "default.replication.factor": "3", + "num.partitions": "3", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance.log", + "config_filename": "producer_performance.properties" + }, + { + "entity_id": "5", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "zookeeper": "localhost:2188", + "log_filename": "console_consumer.log", + "config_filename": "console_consumer.properties" + } + ] +} diff --git a/system_test/replication_testsuite/testcase_10131/testcase_0131_properties.json b/system_test/replication_testsuite/testcase_10131/testcase_0131_properties.json deleted file mode 100644 index a140882..0000000 --- a/system_test/replication_testsuite/testcase_10131/testcase_0131_properties.json +++ /dev/null @@ -1,110 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : Base Test", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10131/testcase_10131_properties.json b/system_test/replication_testsuite/testcase_10131/testcase_10131_properties.json new file mode 100644 index 0000000..a140882 --- /dev/null +++ b/system_test/replication_testsuite/testcase_10131/testcase_10131_properties.json @@ -0,0 +1,110 @@ +{ + "description": {"01":"Leader Failure in Replication with multi topics & partitions : Base Test", + "02":"Produce and consume messages to 2 topics - 3 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:-1, comp:0", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "auto_create_topic": "true", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "3", + "num_iteration": "3", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "102400", + "log.dir": "/tmp/kafka_server_1_logs", + "default.replication.factor": "2", + "num.partitions": "3", + "log.index.interval.bytes": "10", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "102400", + "log.dir": "/tmp/kafka_server_2_logs", + "default.replication.factor": "2", + "num.partitions": "3", + "log.index.interval.bytes": "10", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "102400", + "log.dir": "/tmp/kafka_server_3_logs", + "default.replication.factor": "2", + "num.partitions": "3", + "log.index.interval.bytes": "10", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "producer-retry-backoff-ms": "300", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "producer-retry-backoff-ms": "300", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} diff --git a/system_test/replication_testsuite/testcase_10132/testcase_0132_properties.json b/system_test/replication_testsuite/testcase_10132/testcase_0132_properties.json deleted file mode 100644 index 48b30c7..0000000 --- a/system_test/replication_testsuite/testcase_10132/testcase_0132_properties.json +++ /dev/null @@ -1,107 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. acks => 1", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:1, comp:0", - "10":"Log segment size : 512000" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "topic": "test_2", - "threads": "5", - "compression-codec": "0", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"true", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10132/testcase_10132_properties.json b/system_test/replication_testsuite/testcase_10132/testcase_10132_properties.json new file mode 100644 index 0000000..48b30c7 --- /dev/null +++ b/system_test/replication_testsuite/testcase_10132/testcase_10132_properties.json @@ -0,0 +1,107 @@ +{ + "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. acks => 1", + "02":"Produce and consume messages to 2 topics - 3 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:1, comp:0", + "10":"Log segment size : 512000" + }, + "testcase_args": { + "broker_type": "leader", + "auto_create_topic": "true", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "3", + "num_iteration": "3", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "512000", + "log.dir": "/tmp/kafka_server_1_logs", + "default.replication.factor": "2", + "num.partitions": "3", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "512000", + "log.dir": "/tmp/kafka_server_2_logs", + "default.replication.factor": "2", + "num.partitions": "3", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "512000", + "log.dir": "/tmp/kafka_server_3_logs", + "default.replication.factor": "2", + "num.partitions": "3", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "producer-retry-backoff-ms": "300", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "producer-retry-backoff-ms": "300", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} diff --git a/system_test/replication_testsuite/testcase_10133/testcase_0133_properties.json b/system_test/replication_testsuite/testcase_10133/testcase_0133_properties.json deleted file mode 100644 index 8276aae..0000000 --- a/system_test/replication_testsuite/testcase_10133/testcase_0133_properties.json +++ /dev/null @@ -1,107 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. mode => async; 2. comp => 0", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:async, acks:1, comp:1", - "10":"Log segment size : 512000" - }, - "testcase_args": { - "broker_type": "leader", - "auto_create_topic": "true", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "3", - "num_iteration": "3", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_1_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_2_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "log.segment.bytes": "512000", - "log.dir": "/tmp/kafka_server_3_logs", - "default.replication.factor": "2", - "num.partitions": "3", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "topic": "test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "1", - "producer-retry-backoff-ms": "300", - "sync":"false", - "log_filename": "producer_performance_5.log", - "config_filename": "producer_performance_5.properties" - }, - { - "entity_id": "6", - "topic": "test_1", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_6.log", - "config_filename": "console_consumer_6.properties" - }, - { - "entity_id": "7", - "topic": "test_2", - "group.id": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_7.log", - "config_filename": "console_consumer_7.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10133/testcase_10133_properties.json b/system_test/replication_testsuite/testcase_10133/testcase_10133_properties.json new file mode 100644 index 0000000..8276aae --- /dev/null +++ b/system_test/replication_testsuite/testcase_10133/testcase_10133_properties.json @@ -0,0 +1,107 @@ +{ + "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. mode => async; 2. comp => 0", + "02":"Produce and consume messages to 2 topics - 3 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:1, comp:1", + "10":"Log segment size : 512000" + }, + "testcase_args": { + "broker_type": "leader", + "auto_create_topic": "true", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "3", + "num_iteration": "3", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "broker.id": "1", + "log.segment.bytes": "512000", + "log.dir": "/tmp/kafka_server_1_logs", + "default.replication.factor": "2", + "num.partitions": "3", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "broker.id": "2", + "log.segment.bytes": "512000", + "log.dir": "/tmp/kafka_server_2_logs", + "default.replication.factor": "2", + "num.partitions": "3", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "broker.id": "3", + "log.segment.bytes": "512000", + "log.dir": "/tmp/kafka_server_3_logs", + "default.replication.factor": "2", + "num.partitions": "3", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "producer-retry-backoff-ms": "300", + "sync":"false", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "producer-retry-backoff-ms": "300", + "sync":"false", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "group.id": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} diff --git a/system_test/replication_testsuite/testcase_10134/testcase_0134_properties.json b/system_test/replication_testsuite/testcase_10134/testcase_0134_properties.json deleted file mode 100644 index 73bb859..0000000 --- a/system_test/replication_testsuite/testcase_10134/testcase_0134_properties.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. auto_create_topic => true", - "02":"Produce and consume messages to 2 topics - 3 partitions", - "03":"This test sends messages to 2 replicas", - "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", - "05":"Restart the terminated broker", - "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", - "07":"At the end it verifies the log size and contents", - "08":"Use a consumer to verify no message loss.", - "09":"Producer dimensions : mode:sync, acks:-1, comp:0", - "10":"Log segment size : 102400" - }, - "testcase_args": { - "broker_type": "leader", - "bounce_broker": "true", - "replica_factor": "2", - "num_partition": "3", - "num_iteration": "3", - "auto_create_topic": "true", - "producer_multi_topics_mode": "true", - "consumer_multi_topics_mode": "true", - "sleep_seconds_between_producer_calls": "1", - "message_producing_free_time_sec": "15" - }, - "entities": [ - { - "entity_id": "0", - "clientPort": "2188", - "dataDir": "/tmp/zookeeper_0", - "log_filename": "zookeeper_2188.log", - "config_filename": "zookeeper_2188.properties" - }, - { - "entity_id": "1", - "port": "9091", - "broker.id": "1", - "num.partitions": "3", - "default.replication.factor": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_1_logs", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9091.log", - "config_filename": "kafka_server_9091.properties" - }, - { - "entity_id": "2", - "port": "9092", - "broker.id": "2", - "num.partitions": "3", - "default.replication.factor": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_2_logs", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9092.log", - "config_filename": "kafka_server_9092.properties" - }, - { - "entity_id": "3", - "port": "9093", - "broker.id": "3", - "num.partitions": "3", - "default.replication.factor": "2", - "log.segment.bytes": "102400", - "log.dir": "/tmp/kafka_server_3_logs", - "log.index.interval.bytes": "10", - "log_filename": "kafka_server_9093.log", - "config_filename": "kafka_server_9093.properties" - }, - { - "entity_id": "4", - "topic": "test_1,test_2", - "threads": "5", - "compression-codec": "1", - "message-size": "500", - "message": "100", - "request-num-acks": "-1", - "producer-retry-backoff-ms": "3500", - "producer-num-retries": "3", - "sync":"false", - "log_filename": "producer_performance_4.log", - "config_filename": "producer_performance_4.properties" - }, - { - "entity_id": "5", - "topic": "test_1,test_2", - "groupid": "mytestgroup", - "consumer-timeout-ms": "10000", - "log_filename": "console_consumer_5.log", - "config_filename": "console_consumer_5.properties" - } - ] -} diff --git a/system_test/replication_testsuite/testcase_10134/testcase_10134_properties.json b/system_test/replication_testsuite/testcase_10134/testcase_10134_properties.json new file mode 100644 index 0000000..73bb859 --- /dev/null +++ b/system_test/replication_testsuite/testcase_10134/testcase_10134_properties.json @@ -0,0 +1,92 @@ +{ + "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. auto_create_topic => true", + "02":"Produce and consume messages to 2 topics - 3 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:-1, comp:0", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "3", + "num_iteration": "3", + "auto_create_topic": "true", + "producer_multi_topics_mode": "true", + "consumer_multi_topics_mode": "true", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "broker.id": "1", + "num.partitions": "3", + "default.replication.factor": "2", + "log.segment.bytes": "102400", + "log.dir": "/tmp/kafka_server_1_logs", + "log.index.interval.bytes": "10", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "broker.id": "2", + "num.partitions": "3", + "default.replication.factor": "2", + "log.segment.bytes": "102400", + "log.dir": "/tmp/kafka_server_2_logs", + "log.index.interval.bytes": "10", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "broker.id": "3", + "num.partitions": "3", + "default.replication.factor": "2", + "log.segment.bytes": "102400", + "log.dir": "/tmp/kafka_server_3_logs", + "log.index.interval.bytes": "10", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1,test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "producer-retry-backoff-ms": "3500", + "producer-num-retries": "3", + "sync":"false", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_1,test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_5.log", + "config_filename": "console_consumer_5.properties" + } + ] +} diff --git a/system_test/utils/metrics.py b/system_test/utils/metrics.py index d98d3cd..3e66348 100644 --- a/system_test/utils/metrics.py +++ b/system_test/utils/metrics.py @@ -106,6 +106,8 @@ def ensure_valid_headers(headers, attributes): " headers: {0}".format(",".join(headers))) def plot_graphs(inputCsvFiles, labels, title, xLabel, yLabel, attribute, outputGraphFile): + if not inputCsvFiles: return + # create empty plot fig=plt.figure() fig.subplots_adjust(bottom=0.2) -- 2.1.2