Index: system_test/log_retention_testsuite/cluster_config.json =================================================================== --- system_test/log_retention_testsuite/cluster_config.json (revision 0) +++ system_test/log_retention_testsuite/cluster_config.json (revision 0) @@ -0,0 +1,76 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9990" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9991" + }, + { + "entity_id": "2", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9992" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9993" + }, + { + "entity_id": "4", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9997" + }, + { + "entity_id": "5", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9998" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9999" + }, + { + "entity_id": "7", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9191" + } + ] +} Index: system_test/log_retention_testsuite/config/server.properties =================================================================== --- system_test/log_retention_testsuite/config/server.properties (revision 0) +++ system_test/log_retention_testsuite/config/server.properties (revision 0) @@ -0,0 +1,139 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# see kafka.server.KafkaConfig for additional details and defaults + +############################# Server Basics ############################# + +# The id of the broker. This must be set to a unique integer for each broker. +brokerid=0 + +# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned +# from InetAddress.getLocalHost(). If there are multiple interfaces getLocalHost +# may not be what you want. +#hostname= + + +############################# Socket Server Settings ############################# + +# The port the socket server listens on +port=9091 + +# The number of threads handling network requests +network.threads=2 + +# The number of threads doing disk I/O +io.threads=2 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer=1048576 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer=1048576 + +# The maximum size of a request that the socket server will accept (protection against OOM) +max.socket.request.bytes=104857600 + + +############################# Log Basics ############################# + +# The directory under which to store log files +log.dir=/tmp/kafka_server_logs + +# The number of logical partitions per topic per server. More partitions allow greater parallelism +# for consumption, but also mean more files. +num.partitions=5 + +# Overrides for for the default given by num.partitions on a per-topic basis +#topic.partition.count.map=topic1:3, topic2:4 + +############################# Log Flush Policy ############################# + +# The following configurations control the flush of data to disk. This is the most +# important performance knob in kafka. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data is at greater risk of loss in the event of a crash. +# 2. Latency: Data is not made available to consumers until it is flushed (which adds latency). +# 3. Throughput: The flush is generally the most expensive operation. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +log.flush.interval=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +log.default.flush.interval.ms=1000 + +# Per-topic overrides for log.default.flush.interval.ms +#topic.flush.intervals.ms=topic1:1000, topic2:3000 + +# The interval (in ms) at which logs are checked to see if they need to be flushed to disk. +log.default.flush.scheduler.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining +# segments don't drop below log.retention.size. +#log.retention.size=1073741824 +log.retention.size=-1 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +#log.file.size=536870912 +log.file.size=102400 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.cleanup.interval.mins=1 + +############################# Zookeeper ############################# + +# Enable connecting to zookeeper +enable.zookeeper=true + +# Zk connection string (see zk docs for details). +# This is a comma separated host:port pairs, each corresponding to a zk +# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". +# You can also append an optional chroot string to the urls to specify the +# root directory for all kafka znodes. +zk.connect=localhost:2181 + +# Timeout in ms for connecting to zookeeper +zk.connectiontimeout.ms=1000000 + +monitoring.period.secs=1 +max.message.size=1000000 +max.queued.requests=500 +log.roll.hours=168 +log.index.max.size=10485760 +log.index.interval.bytes=4096 +auto.create.topics=true +controller.socket.timeout.ms=30000 +controller.message.queue.size=10 +default.replication.factor=1 +replica.max.lag.time.ms=10000 +replica.max.lag.bytes=4000 +replica.socket.timeout.ms=30000 +replica.socket.buffersize=65536 +replica.fetch.size=1048576 +replica.fetch.wait.time.ms=500 +replica.fetch.min.bytes=4096 +replica.fetchers=1 Index: system_test/log_retention_testsuite/config/zookeeper.properties =================================================================== --- system_test/log_retention_testsuite/config/zookeeper.properties (revision 0) +++ system_test/log_retention_testsuite/config/zookeeper.properties (revision 0) @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# the directory where the snapshot is stored. +dataDir=/tmp/zookeeper +# the port at which the clients will connect +clientPort=2181 +# disable the per-ip limit on the number of connections since this is a non-production config +maxClientCnxns=0 Index: system_test/log_retention_testsuite/log_retention_test.py =================================================================== --- system_test/log_retention_testsuite/log_retention_test.py (revision 0) +++ system_test/log_retention_testsuite/log_retention_test.py (revision 0) @@ -0,0 +1,353 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +#!/usr/bin/env python + +# =================================== +# log_retention_test.py +# =================================== + +import inspect +import logging +import os +import pprint +import signal +import subprocess +import sys +import time +import traceback + +from system_test_env import SystemTestEnv +sys.path.append(SystemTestEnv.SYSTEM_TEST_UTIL_DIR) + +from setup_utils import SetupUtils +from replication_utils import ReplicationUtils +import system_test_utils +from testcase_env import TestcaseEnv + +# product specific: Kafka +import kafka_system_test_utils +import metrics + +class LogRetentionTest(ReplicationUtils, SetupUtils): + + testModuleAbsPathName = os.path.realpath(__file__) + testSuiteAbsPathName = os.path.abspath(os.path.dirname(testModuleAbsPathName)) + + def __init__(self, systemTestEnv): + + # SystemTestEnv - provides cluster level environment settings + # such as entity_id, hostname, kafka_home, java_home which + # are available in a list of dictionary named + # "clusterEntityConfigDictList" + self.systemTestEnv = systemTestEnv + + super(LogRetentionTest, self).__init__(self) + + # dict to pass user-defined attributes to logger argument: "extra" + d = {'name_of_class': self.__class__.__name__} + + def signal_handler(self, signal, frame): + self.log_message("Interrupt detected - User pressed Ctrl+c") + + # perform the necessary cleanup here when user presses Ctrl+c and it may be product specific + self.log_message("stopping all entities - please wait ...") + kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv) + sys.exit(1) + + def runTest(self): + + # ====================================================================== + # get all testcase directories under this testsuite + # ====================================================================== + testCasePathNameList = system_test_utils.get_dir_paths_with_prefix( + self.testSuiteAbsPathName, SystemTestEnv.SYSTEM_TEST_CASE_PREFIX) + testCasePathNameList.sort() + + # ============================================================= + # launch each testcase one by one: testcase_1, testcase_2, ... + # ============================================================= + for testCasePathName in testCasePathNameList: + + skipThisTestCase = False + + try: + # ====================================================================== + # A new instance of TestcaseEnv to keep track of this testcase's env vars + # and initialize some env vars as testCasePathName is available now + # ====================================================================== + self.testcaseEnv = TestcaseEnv(self.systemTestEnv, self) + self.testcaseEnv.testSuiteBaseDir = self.testSuiteAbsPathName + self.testcaseEnv.initWithKnownTestCasePathName(testCasePathName) + self.testcaseEnv.testcaseArgumentsDict = self.testcaseEnv.testcaseNonEntityDataDict["testcase_args"] + + # ====================================================================== + # SKIP if this case is IN testcase_to_skip.json or NOT IN testcase_to_run.json + # ====================================================================== + testcaseDirName = self.testcaseEnv.testcaseResultsDict["_test_case_name"] + + if self.systemTestEnv.printTestDescriptionsOnly: + self.testcaseEnv.printTestCaseDescription(testcaseDirName) + continue + elif self.systemTestEnv.isTestCaseToSkip(self.__class__.__name__, testcaseDirName): + self.log_message("Skipping : " + testcaseDirName) + skipThisTestCase = True + continue + else: + self.testcaseEnv.printTestCaseDescription(testcaseDirName) + system_test_utils.setup_remote_hosts_with_testcase_level_cluster_config(self.systemTestEnv, testCasePathName) + + + # ============================================================================== # + # ============================================================================== # + # Product Specific Testing Code Starts Here: # + # ============================================================================== # + # ============================================================================== # + + # initialize self.testcaseEnv with user-defined environment variables (product specific) + self.testcaseEnv.userDefinedEnvVarDict["zkConnectStr"] = "" + self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = False + self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"] = False + self.testcaseEnv.userDefinedEnvVarDict["leaderElectionLatencyList"] = [] + + # initialize signal handler + signal.signal(signal.SIGINT, self.signal_handler) + + # create "LOCAL" log directories for metrics, dashboards for each entity under this testcase + # for collecting logs from remote machines + kafka_system_test_utils.generate_testcase_log_dirs(self.systemTestEnv, self.testcaseEnv) + + # TestcaseEnv.testcaseConfigsList initialized by reading testcase properties file: + # system_test/_testsuite/testcase_/testcase__properties.json + self.testcaseEnv.testcaseConfigsList = system_test_utils.get_json_list_data( + self.testcaseEnv.testcasePropJsonPathName) + + # TestcaseEnv - initialize producer & consumer config / log file pathnames + kafka_system_test_utils.init_entity_props(self.systemTestEnv, self.testcaseEnv) + + + # clean up data directories specified in zookeeper.properties and kafka_server_.properties + kafka_system_test_utils.cleanup_data_at_remote_hosts(self.systemTestEnv, self.testcaseEnv) + + # generate remote hosts log/config dirs if not exist + kafka_system_test_utils.generate_testcase_log_dirs_in_remote_hosts(self.systemTestEnv, self.testcaseEnv) + + # generate properties files for zookeeper, kafka, producer, consumer: + # 1. copy system_test/_testsuite/config/*.properties to + # system_test/_testsuite/testcase_/config/ + # 2. update all properties files in system_test/_testsuite/testcase_/config + # by overriding the settings specified in: + # system_test/_testsuite/testcase_/testcase__properties.json + kafka_system_test_utils.generate_overriden_props_files(self.testSuiteAbsPathName, + self.testcaseEnv, self.systemTestEnv) + + # ============================================= + # preparing all entities to start the test + # ============================================= + self.log_message("starting zookeepers") + kafka_system_test_utils.start_zookeepers(self.systemTestEnv, self.testcaseEnv) + self.anonLogger.info("sleeping for 2s") + time.sleep(2) + + self.log_message("starting brokers") + kafka_system_test_utils.start_brokers(self.systemTestEnv, self.testcaseEnv) + self.anonLogger.info("sleeping for 5s") + time.sleep(5) + + self.log_message("creating topics") + kafka_system_test_utils.create_topic(self.systemTestEnv, self.testcaseEnv) + self.anonLogger.info("sleeping for 5s") + time.sleep(5) + + # ============================================= + # starting consumer + # ============================================= + self.log_message("starting consumer in the background") + kafka_system_test_utils.start_console_consumer(self.systemTestEnv, self.testcaseEnv) + time.sleep(1) + + # ============================================= + # starting producer + # ============================================= + self.log_message("starting producer in the background") + kafka_system_test_utils.start_producer_performance(self.systemTestEnv, self.testcaseEnv, False) + msgProducingFreeTimeSec = self.testcaseEnv.testcaseArgumentsDict["message_producing_free_time_sec"] + self.anonLogger.info("sleeping for " + msgProducingFreeTimeSec + " sec to produce some messages") + time.sleep(int(msgProducingFreeTimeSec)) + + # ============================================= + # A while-loop to bounce leader as specified + # by "num_iterations" in testcase_n_properties.json + # ============================================= + i = 1 + numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"]) + brokerType = self.testcaseEnv.testcaseArgumentsDict["broker_type"] + bounceBrokerFlag = self.testcaseEnv.testcaseArgumentsDict["bounce_broker"] + while i <= numIterations: + self.log_message("Iteration " + str(i) + " of " + str(numIterations)) + self.log_message("bounce_broker flag : " + bounceBrokerFlag) + + stoppedBrokerEntityId = "" + + # ============================================= + # Find out the entity id for the stopping broker + # ============================================= + + # Leader or Follower + if brokerType == "leader" or brokerType == "follower": + self.log_message("looking up leader") + leaderDict = kafka_system_test_utils.get_leader_elected_log_line( + self.systemTestEnv, self.testcaseEnv, self.leaderAttributesDict) + + # Leader + if brokerType == "leader": + stoppedBrokerEntityId = leaderDict["entity_id"] + self.log_message("Found leader with entity id: " + stoppedBrokerEntityId) + + # Follower + else: + # a list of all brokers + brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts( + self.systemTestEnv.clusterEntityConfigDictList, "role", "broker", "entity_id") + # we pick the follower from the first broker which is not the leader + firstFollowerEntityId = None + for brokerEntityId in brokerEntityIdList: + if brokerEntityId != leaderDict["entity_id"]: + firstFollowerEntityId = brokerEntityId + break + stoppedBrokerEntityId = firstFollowerEntityId + self.log_message("Found follower with entity id: " + stoppedBrokerEntityId) + + # Controller + elif brokerType == "controller": + self.log_message("looking up controller") + controllerDict = kafka_system_test_utils.get_controller_attributes(self.systemTestEnv, self.testcaseEnv) + stoppedBrokerEntityId = controllerDict["entity_id"] + self.log_message("Found controller with entity id: " + stoppedBrokerEntityId) + + # ============================================= + # Bounce the broker + # ============================================= + if bounceBrokerFlag.lower() == "true": + self.log_message("stopping broker with entity id: " + stoppedBrokerEntityId) + kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedBrokerEntityId, + self.testcaseEnv.entityBrokerParentPidDict[stoppedBrokerEntityId]) + + brokerDownTimeInSec = 30 + try: + brokerDownTimeInSec = int(self.testcaseEnv.testcaseArgumentsDict["broker_down_time_in_sec"]) + except: + # take default + pass + time.sleep(brokerDownTimeInSec) + + self.log_message("starting the previously terminated broker") + kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, stoppedBrokerEntityId) + + + self.anonLogger.info("sleeping for 15s") + time.sleep(15) + i += 1 + # while loop + + # ============================================= + # tell producer to stop + # ============================================= + self.testcaseEnv.lock.acquire() + self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = True + time.sleep(1) + self.testcaseEnv.lock.release() + time.sleep(1) + + # ============================================= + # wait for producer thread's update of + # "backgroundProducerStopped" to be "True" + # ============================================= + while 1: + self.testcaseEnv.lock.acquire() + self.logger.info("status of backgroundProducerStopped : [" + \ + str(self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]) + "]", extra=self.d) + if self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]: + time.sleep(1) + self.logger.info("all producer threads completed", extra=self.d) + break + time.sleep(1) + self.testcaseEnv.lock.release() + time.sleep(2) + + # ============================================= + # collect logs from remote hosts + # ============================================= + kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv) + + minStartingOffsetDict = kafka_system_test_utils.getMinCommonStartingOffset(self.systemTestEnv, self.testcaseEnv) + print + pprint.pprint(minStartingOffsetDict) + + # ============================================= + # starting consumer + # ============================================= + self.log_message("starting debug consumers in the background") + kafka_system_test_utils.start_simple_consumer(self.systemTestEnv, self.testcaseEnv, minStartingOffsetDict) + self.anonLogger.info("sleeping for 10s") + time.sleep(10) + + # ============================================= + # this testcase is completed - stop all entities + # ============================================= + self.log_message("stopping all entities") + for entityId, parentPid in self.testcaseEnv.entityBrokerParentPidDict.items(): + kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid) + + for entityId, parentPid in self.testcaseEnv.entityZkParentPidDict.items(): + kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid) + + # make sure all entities are stopped + kafka_system_test_utils.ps_grep_terminate_running_entity(self.systemTestEnv) + + # ============================================= + # collect logs from remote hosts + # ============================================= + kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv) + + # ============================================= + # validate the data matched and checksum + # ============================================= + self.log_message("validating data matched") + kafka_system_test_utils.validate_simple_consumer_data_matched_across_replicas(self.systemTestEnv, self.testcaseEnv) + kafka_system_test_utils.validate_data_matched(self.systemTestEnv, self.testcaseEnv) + + # ============================================= + # draw graphs + # ============================================= + metrics.draw_all_graphs(self.systemTestEnv.METRICS_PATHNAME, + self.testcaseEnv, + self.systemTestEnv.clusterEntityConfigDictList) + + # build dashboard, one for each role + metrics.build_all_dashboards(self.systemTestEnv.METRICS_PATHNAME, + self.testcaseEnv.testCaseDashboardsDir, + self.systemTestEnv.clusterEntityConfigDictList) + + except Exception as e: + self.log_message("Exception while running test {0}".format(e)) + traceback.print_exc() + + finally: + if not skipThisTestCase and not self.systemTestEnv.printTestDescriptionsOnly: + self.log_message("stopping all entities - please wait ...") + kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv) + Index: system_test/log_retention_testsuite/testcase_4001/testcase_4001_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4001/testcase_4001_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4001/testcase_4001_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention : Base Test", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:-1, comp:0", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4002/testcase_4002_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4002/testcase_4002_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4002/testcase_4002_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention : 1. acks => 1", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:1, comp:0", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4003/testcase_4003_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4003/testcase_4003_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4003/testcase_4003_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention : 1. acks => -1, comp => 1", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:-1, comp:1", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4004/testcase_4004_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4004/testcase_4004_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4004/testcase_4004_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention : 1. acks => 1", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:1, comp:1", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4005/testcase_4005_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4005/testcase_4005_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4005/testcase_4005_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention : 1. sync => false, acks => -1, comp => 0", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:-1, comp:0", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"false", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"false", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4006/testcase_4006_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4006/testcase_4006_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4006/testcase_4006_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention : 1. acks => 1", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:1, comp:0", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4007/testcase_4007_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4007/testcase_4007_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4007/testcase_4007_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention : 1. acks => -1, 2. comp => 1", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:-1, comp:1", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"false", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"false", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4008/testcase_4008_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4008/testcase_4008_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4008/testcase_4008_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention : 1. acks => 1", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 3 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:1, comp:1", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4011/testcase_4011_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4011/testcase_4011_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4011/testcase_4011_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : Base Test", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:-1, comp:0", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4012/testcase_4012_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4012/testcase_4012_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4012/testcase_4012_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:1, comp:0", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4013/testcase_4013_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4013/testcase_4013_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4013/testcase_4013_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => -1, comp => 1", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:-1, comp:1", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4014/testcase_4014_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4014/testcase_4014_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4014/testcase_4014_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:1, comp:1", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4015/testcase_4015_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4015/testcase_4015_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4015/testcase_4015_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. sync => false, acks => -1, comp => 0", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:-1, comp:0", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"false", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"false", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4016/testcase_4016_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4016/testcase_4016_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4016/testcase_4016_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:1, comp:0", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4017/testcase_4017_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4017/testcase_4017_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4017/testcase_4017_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => -1, 2. comp => 1", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:-1, comp:1", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"false", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "sync":"false", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/log_retention_testsuite/testcase_4018/testcase_4018_properties.json =================================================================== --- system_test/log_retention_testsuite/testcase_4018/testcase_4018_properties.json (revision 0) +++ system_test/log_retention_testsuite/testcase_4018/testcase_4018_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Broker Log Retention (Replica Factor < No. of Brokers) : 1. acks => 1", + "02":"Produce and consume messages to 2 topics - 2 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:1, comp:1", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "broker_down_time_in_sec": "5", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.retention.size": "1048576", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "60000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/migration_tool_testsuite/migration_tool_test.py =================================================================== --- system_test/migration_tool_testsuite/migration_tool_test.py (revision 1406271) +++ system_test/migration_tool_testsuite/migration_tool_test.py (working copy) @@ -188,51 +188,39 @@ # ============================================= i = 1 numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"]) + bouncedEntityDownTimeSec = 1 + try: + bouncedEntityDownTimeSec = int(self.testcaseEnv.testcaseArgumentsDict["bounced_entity_downtime_sec"]) + except: + pass + while i <= numIterations: self.log_message("Iteration " + str(i) + " of " + str(numIterations)) - self.log_message("looking up leader") - leaderDict = kafka_system_test_utils.get_leader_elected_log_line( - self.systemTestEnv, self.testcaseEnv, self.leaderAttributesDict) - - # ========================== - # leaderDict looks like this: - # ========================== - #{'entity_id': u'3', - # 'partition': '0', - # 'timestamp': 1345050255.8280001, - # 'hostname': u'localhost', - # 'topic': 'test_1', - # 'brokerid': '3'} - # ============================================= - # validate to see if leader election is successful + # Bounce Migration Tool # ============================================= - self.log_message("validating leader election") - result = kafka_system_test_utils.validate_leader_election_successful( - self.testcaseEnv, leaderDict, self.testcaseEnv.validationStatusDict) - - # ============================================= - # trigger leader re-election by stopping leader - # to get re-election latency - # ============================================= - bounceLeaderFlag = self.testcaseEnv.testcaseArgumentsDict["bounce_leader"] - self.log_message("bounce_leader flag : " + bounceLeaderFlag) - if (bounceLeaderFlag.lower() == "true"): - reelectionLatency = kafka_system_test_utils.get_reelection_latency( - self.systemTestEnv, self.testcaseEnv, leaderDict, self.leaderAttributesDict) - latencyKeyName = "Leader Election Latency - iter " + str(i) + " brokerid " + leaderDict["brokerid"] - self.testcaseEnv.validationStatusDict[latencyKeyName] = str("{0:.2f}".format(reelectionLatency * 1000)) + " ms" - - # ============================================= - # starting previously terminated broker - # ============================================= - if bounceLeaderFlag.lower() == "true": - self.log_message("starting the previously terminated broker") - stoppedLeaderEntityId = leaderDict["entity_id"] - kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, stoppedLeaderEntityId) + bounceMigrationTool = self.testcaseEnv.testcaseArgumentsDict["bounce_migration_tool"] + self.log_message("bounce_migration_tool flag : " + bounceMigrationTool) + if (bounceMigrationTool.lower() == "true"): + clusterConfigList = self.systemTestEnv.clusterEntityConfigDictList + migrationToolEntityIdList = system_test_utils.get_data_from_list_of_dicts( + clusterConfigList, "role", "migration_tool", "entity_id") + + stoppedMigrationToolEntityId = migrationToolEntityIdList[0] + migrationToolPPid = self.testcaseEnv.entityMigrationToolParentPidDict[stoppedMigrationToolEntityId] + + self.log_message("stopping migration tool : " + migrationToolPPid) + kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedMigrationToolEntityId, migrationToolPPid) + self.anonLogger.info("sleeping for " + str(bouncedEntityDownTimeSec) + " sec") + time.sleep(bouncedEntityDownTimeSec) + + # starting previously terminated broker + self.log_message("starting the previously terminated migration tool") + kafka_system_test_utils.start_migration_tool(self.systemTestEnv, self.testcaseEnv, stoppedMigrationToolEntityId) + self.anonLogger.info("sleeping for 15s") time.sleep(15) i += 1 Index: system_test/migration_tool_testsuite/testcase_9001/testcase_9001_properties.json =================================================================== --- system_test/migration_tool_testsuite/testcase_9001/testcase_9001_properties.json (revision 1406271) +++ system_test/migration_tool_testsuite/testcase_9001/testcase_9001_properties.json (working copy) @@ -9,7 +9,7 @@ "08":"Log segment size : 51200" }, "testcase_args": { - "bounce_leader": "false", + "bounce_migration_tool": "false", "replica_factor": "3", "num_partition": "1", "num_iteration": "1", Index: system_test/migration_tool_testsuite/testcase_9003/cluster_config.json =================================================================== --- system_test/migration_tool_testsuite/testcase_9003/cluster_config.json (revision 0) +++ system_test/migration_tool_testsuite/testcase_9003/cluster_config.json (revision 0) @@ -0,0 +1,112 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9990" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9991" + }, + { + "entity_id": "2", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9992" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9993" + }, + { + "entity_id": "4", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9994" + }, + { + "entity_id": "5", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9995" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9996" + }, + { + "entity_id": "7", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "/export/apps/jdk/JDK-1_6_0_27", + "jmx_port": "9997" + }, + { + "entity_id": "8", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9998" + }, + { + "entity_id": "9", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9999" + }, + { + "entity_id": "10", + "hostname": "localhost", + "role": "migration_tool", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9890" + }, + { + "entity_id": "11", + "hostname": "localhost", + "role": "migration_tool", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9891" + } + ] +} Index: system_test/migration_tool_testsuite/testcase_9003/testcase_9003_properties.json =================================================================== --- system_test/migration_tool_testsuite/testcase_9003/testcase_9003_properties.json (revision 0) +++ system_test/migration_tool_testsuite/testcase_9003/testcase_9003_properties.json (revision 0) @@ -0,0 +1,136 @@ +{ + "description": {"01":"To Test : 'Replication with Migration Tool'", + "02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET", + "03":"Produce and consume messages to a single topic - single partition.", + "04":"This test sends messages to 3 replicas", + "05":"At the end it verifies the log size and contents", + "06":"Use a consumer to verify no message loss in TARGET cluster.", + "07":"Producer dimensions : mode:async, acks:-1, comp:1", + "08":"Log segment size : 51200" + }, + "testcase_args": { + "bounce_migration_tool": "true", + "bounced_entity_downtime_sec": "30", + "replica_factor": "3", + "num_partition": "1", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "30", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_0.log", + "config_filename": "zookeeper_0.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_1.log", + "config_filename": "kafka_server_1.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_2.log", + "config_filename": "kafka_server_2.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_3.log", + "config_filename": "kafka_server_3.properties" + }, + { + "entity_id": "4", + "port": "9094", + "brokerid": "4", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_4_logs", + "log_filename": "kafka_server_4.log", + "config_filename": "kafka_server_4.properties" + }, + { + "entity_id": "5", + "port": "9095", + "brokerid": "5", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_5_logs", + "log_filename": "kafka_server_5.log", + "config_filename": "kafka_server_5.properties" + }, + { + "entity_id": "6", + "port": "9096", + "brokerid": "6", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_6_logs", + "log_filename": "kafka_server_6.log", + "config_filename": "kafka_server_6.properties" + }, + { + "entity_id": "7", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "async": "true", + "log_filename": "producer_performance_7.log", + "config_filename": "producer_performance_7.properties" + }, + { + "entity_id": "8", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_8.log", + "formatter": "kafka.consumer.ChecksumMessageFormatter", + "config_filename": "console_consumer_8.properties" + }, + { + "entity_id": "9", + "clientPort": "2191", + "dataDir": "/tmp/zookeeper_9", + "log_filename": "zookeeper_9.log", + "config_filename": "zookeeper_9.properties" + }, + { + "entity_id": "10", + "whitelist": ".*", + "num.producers": "2", + "num.streams": "2", + "producer.config": "migration_tool_testsuite/config/migration_producer.properties", + "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties", + "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar", + "kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar", + "log_filename": "migration_tool_10.log", + "config_filename": "migration_tool_10.properties" + }, + { + "entity_id": "11", + "whitelist": ".*", + "num.producers": "2", + "num.streams": "2", + "producer.config": "migration_tool_testsuite/config/migration_producer.properties", + "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties", + "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar", + "kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar", + "log_filename": "migration_tool_11.log", + "config_filename": "migration_tool_11.properties" + } + ] +} Index: system_test/migration_tool_testsuite/testcase_9004/cluster_config.json =================================================================== --- system_test/migration_tool_testsuite/testcase_9004/cluster_config.json (revision 0) +++ system_test/migration_tool_testsuite/testcase_9004/cluster_config.json (revision 0) @@ -0,0 +1,112 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9990" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9991" + }, + { + "entity_id": "2", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9992" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9993" + }, + { + "entity_id": "4", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9994" + }, + { + "entity_id": "5", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9995" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9996" + }, + { + "entity_id": "7", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "/export/apps/jdk/JDK-1_6_0_27", + "jmx_port": "9997" + }, + { + "entity_id": "8", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9998" + }, + { + "entity_id": "9", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9999" + }, + { + "entity_id": "10", + "hostname": "localhost", + "role": "migration_tool", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9890" + }, + { + "entity_id": "11", + "hostname": "localhost", + "role": "migration_tool", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9891" + } + ] +} Index: system_test/migration_tool_testsuite/testcase_9004/testcase_9004_properties.json =================================================================== --- system_test/migration_tool_testsuite/testcase_9004/testcase_9004_properties.json (revision 0) +++ system_test/migration_tool_testsuite/testcase_9004/testcase_9004_properties.json (revision 0) @@ -0,0 +1,136 @@ +{ + "description": {"01":"To Test : 'Replication with Migration Tool'", + "02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET", + "03":"Produce and consume messages to a single topic - single partition.", + "04":"This test sends messages to 3 replicas", + "05":"At the end it verifies the log size and contents", + "06":"Use a consumer to verify no message loss in TARGET cluster.", + "07":"Producer dimensions : mode:async, acks:1, comp:1", + "08":"Log segment size : 51200" + }, + "testcase_args": { + "bounce_migration_tool": "true", + "bounced_entity_downtime_sec": "30", + "replica_factor": "3", + "num_partition": "1", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "30", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_0.log", + "config_filename": "zookeeper_0.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_1.log", + "config_filename": "kafka_server_1.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_2.log", + "config_filename": "kafka_server_2.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_3.log", + "config_filename": "kafka_server_3.properties" + }, + { + "entity_id": "4", + "port": "9094", + "brokerid": "4", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_4_logs", + "log_filename": "kafka_server_4.log", + "config_filename": "kafka_server_4.properties" + }, + { + "entity_id": "5", + "port": "9095", + "brokerid": "5", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_5_logs", + "log_filename": "kafka_server_5.log", + "config_filename": "kafka_server_5.properties" + }, + { + "entity_id": "6", + "port": "9096", + "brokerid": "6", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_6_logs", + "log_filename": "kafka_server_6.log", + "config_filename": "kafka_server_6.properties" + }, + { + "entity_id": "7", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "async": "true", + "log_filename": "producer_performance_7.log", + "config_filename": "producer_performance_7.properties" + }, + { + "entity_id": "8", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_8.log", + "formatter": "kafka.consumer.ChecksumMessageFormatter", + "config_filename": "console_consumer_8.properties" + }, + { + "entity_id": "9", + "clientPort": "2191", + "dataDir": "/tmp/zookeeper_9", + "log_filename": "zookeeper_9.log", + "config_filename": "zookeeper_9.properties" + }, + { + "entity_id": "10", + "whitelist": ".*", + "num.producers": "2", + "num.streams": "2", + "producer.config": "migration_tool_testsuite/config/migration_producer.properties", + "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties", + "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar", + "kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar", + "log_filename": "migration_tool_10.log", + "config_filename": "migration_tool_10.properties" + }, + { + "entity_id": "11", + "whitelist": ".*", + "num.producers": "2", + "num.streams": "2", + "producer.config": "migration_tool_testsuite/config/migration_producer.properties", + "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties", + "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar", + "kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar", + "log_filename": "migration_tool_11.log", + "config_filename": "migration_tool_11.properties" + } + ] +} Index: system_test/migration_tool_testsuite/testcase_9005/cluster_config.json =================================================================== --- system_test/migration_tool_testsuite/testcase_9005/cluster_config.json (revision 0) +++ system_test/migration_tool_testsuite/testcase_9005/cluster_config.json (revision 0) @@ -0,0 +1,141 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9900" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9901" + }, + + + { + "entity_id": "2", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9902" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9903" + }, + { + "entity_id": "4", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9904" + }, + + + { + "entity_id": "5", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9905" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9906" + }, + { + "entity_id": "7", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9907" + }, + + + { + "entity_id": "8", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "/export/apps/jdk/JDK-1_6_0_27", + "jmx_port": "9908" + }, + { + "entity_id": "9", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "/export/apps/jdk/JDK-1_6_0_27", + "jmx_port": "9909" + }, + + + { + "entity_id": "10", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9910" + }, + { + "entity_id": "11", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9911" + }, + + + { + "entity_id": "12", + "hostname": "localhost", + "role": "migration_tool", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9912" + }, + { + "entity_id": "13", + "hostname": "localhost", + "role": "migration_tool", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9913" + } + + ] +} Index: system_test/migration_tool_testsuite/testcase_9005/testcase_9005_properties.json =================================================================== --- system_test/migration_tool_testsuite/testcase_9005/testcase_9005_properties.json (revision 0) +++ system_test/migration_tool_testsuite/testcase_9005/testcase_9005_properties.json (revision 0) @@ -0,0 +1,167 @@ +{ + "description": {"01":"To Test : 'Replication with Migration Tool'", + "02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET", + "03":"Produce and consume messages to 2 topics - 2 partitions.", + "04":"This test sends messages to 3 replicas", + "05":"At the end it verifies the log size and contents", + "06":"Use a consumer to verify no message loss in TARGET cluster.", + "07":"Producer dimensions : mode:async, acks:-1, comp:1", + "08":"Log segment size : 51200" + }, + "testcase_args": { + "bounce_migration_tool": "true", + "bounced_entity_downtime_sec": "30", + "replica_factor": "3", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "30", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_0.log", + "config_filename": "zookeeper_0.properties" + }, + { + "entity_id": "1", + "clientPort": "2191", + "dataDir": "/tmp/zookeeper_1", + "log_filename": "zookeeper_1.log", + "config_filename": "zookeeper_1.properties" + }, + + + { + "entity_id": "2", + "port": "9091", + "brokerid": "1", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_2.log", + "config_filename": "kafka_server_2.properties" + }, + { + "entity_id": "3", + "port": "9092", + "brokerid": "2", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_3.log", + "config_filename": "kafka_server_3.properties" + }, + { + "entity_id": "4", + "port": "9093", + "brokerid": "3", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_4_logs", + "log_filename": "kafka_server_4.log", + "config_filename": "kafka_server_4.properties" + }, + + + { + "entity_id": "5", + "port": "9094", + "brokerid": "4", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_5_logs", + "log_filename": "kafka_server_5.log", + "config_filename": "kafka_server_5.properties" + }, + { + "entity_id": "6", + "port": "9095", + "brokerid": "5", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_6_logs", + "log_filename": "kafka_server_6.log", + "config_filename": "kafka_server_6.properties" + }, + { + "entity_id": "7", + "port": "9096", + "brokerid": "6", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_7_logs", + "log_filename": "kafka_server_7.log", + "config_filename": "kafka_server_7.properties" + }, + + + { + "entity_id": "8", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "async": "true", + "log_filename": "producer_performance_8.log", + "config_filename": "producer_performance_8.properties" + }, + { + "entity_id": "9", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "-1", + "async": "true", + "log_filename": "producer_performance_9.log", + "config_filename": "producer_performance_9.properties" + }, + + + { + "entity_id": "10", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_10.log", + "formatter": "kafka.consumer.ChecksumMessageFormatter", + "config_filename": "console_consumer_10.properties" + }, + { + "entity_id": "11", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_11.log", + "formatter": "kafka.consumer.ChecksumMessageFormatter", + "config_filename": "console_consumer_11.properties" + }, + + + { + "entity_id": "12", + "whitelist": ".*", + "num.producers": "2", + "num.streams": "2", + "producer.config": "migration_tool_testsuite/config/migration_producer.properties", + "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties", + "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar", + "kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar", + "log_filename": "migration_tool_12.log", + "config_filename": "migration_tool_12.properties" + }, + { + "entity_id": "13", + "whitelist": ".*", + "num.producers": "2", + "num.streams": "2", + "producer.config": "migration_tool_testsuite/config/migration_producer.properties", + "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties", + "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar", + "kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar", + "log_filename": "migration_tool_13.log", + "config_filename": "migration_tool_13.properties" + } + ] +} Index: system_test/migration_tool_testsuite/testcase_9006/cluster_config.json =================================================================== --- system_test/migration_tool_testsuite/testcase_9006/cluster_config.json (revision 0) +++ system_test/migration_tool_testsuite/testcase_9006/cluster_config.json (revision 0) @@ -0,0 +1,141 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9900" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9901" + }, + + + { + "entity_id": "2", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9902" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9903" + }, + { + "entity_id": "4", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "default", + "jmx_port": "9904" + }, + + + { + "entity_id": "5", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9905" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9906" + }, + { + "entity_id": "7", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9907" + }, + + + { + "entity_id": "8", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "/export/apps/jdk/JDK-1_6_0_27", + "jmx_port": "9908" + }, + { + "entity_id": "9", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name":"source", + "kafka_home": "system_test/migration_tool_testsuite/0.7", + "java_home": "/export/apps/jdk/JDK-1_6_0_27", + "jmx_port": "9909" + }, + + + { + "entity_id": "10", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9910" + }, + { + "entity_id": "11", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9911" + }, + + + { + "entity_id": "12", + "hostname": "localhost", + "role": "migration_tool", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9912" + }, + { + "entity_id": "13", + "hostname": "localhost", + "role": "migration_tool", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9913" + } + + ] +} Index: system_test/migration_tool_testsuite/testcase_9006/testcase_9006_properties.json =================================================================== --- system_test/migration_tool_testsuite/testcase_9006/testcase_9006_properties.json (revision 0) +++ system_test/migration_tool_testsuite/testcase_9006/testcase_9006_properties.json (revision 0) @@ -0,0 +1,167 @@ +{ + "description": {"01":"To Test : 'Replication with Migration Tool'", + "02":"Set up 2 clusters such as : SOURCE => Migration Tool => TARGET", + "03":"Produce and consume messages to 2 topics - 2 partitions.", + "04":"This test sends messages to 3 replicas", + "05":"At the end it verifies the log size and contents", + "06":"Use a consumer to verify no message loss in TARGET cluster.", + "07":"Producer dimensions : mode:async, acks:1, comp:1", + "08":"Log segment size : 51200" + }, + "testcase_args": { + "bounce_migration_tool": "true", + "bounced_entity_downtime_sec": "30", + "replica_factor": "3", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "30", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_0.log", + "config_filename": "zookeeper_0.properties" + }, + { + "entity_id": "1", + "clientPort": "2191", + "dataDir": "/tmp/zookeeper_1", + "log_filename": "zookeeper_1.log", + "config_filename": "zookeeper_1.properties" + }, + + + { + "entity_id": "2", + "port": "9091", + "brokerid": "1", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_2.log", + "config_filename": "kafka_server_2.properties" + }, + { + "entity_id": "3", + "port": "9092", + "brokerid": "2", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_3.log", + "config_filename": "kafka_server_3.properties" + }, + { + "entity_id": "4", + "port": "9093", + "brokerid": "3", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_4_logs", + "log_filename": "kafka_server_4.log", + "config_filename": "kafka_server_4.properties" + }, + + + { + "entity_id": "5", + "port": "9094", + "brokerid": "4", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_5_logs", + "log_filename": "kafka_server_5.log", + "config_filename": "kafka_server_5.properties" + }, + { + "entity_id": "6", + "port": "9095", + "brokerid": "5", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_6_logs", + "log_filename": "kafka_server_6.log", + "config_filename": "kafka_server_6.properties" + }, + { + "entity_id": "7", + "port": "9096", + "brokerid": "6", + "log.file.size": "51200", + "log.dir": "/tmp/kafka_server_7_logs", + "log_filename": "kafka_server_7.log", + "config_filename": "kafka_server_7.properties" + }, + + + { + "entity_id": "8", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "async": "true", + "log_filename": "producer_performance_8.log", + "config_filename": "producer_performance_8.properties" + }, + { + "entity_id": "9", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "500", + "request-num-acks": "1", + "async": "true", + "log_filename": "producer_performance_9.log", + "config_filename": "producer_performance_9.properties" + }, + + + { + "entity_id": "10", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_10.log", + "formatter": "kafka.consumer.ChecksumMessageFormatter", + "config_filename": "console_consumer_10.properties" + }, + { + "entity_id": "11", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_11.log", + "formatter": "kafka.consumer.ChecksumMessageFormatter", + "config_filename": "console_consumer_11.properties" + }, + + + { + "entity_id": "12", + "whitelist": ".*", + "num.producers": "2", + "num.streams": "2", + "producer.config": "migration_tool_testsuite/config/migration_producer.properties", + "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties", + "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar", + "kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar", + "log_filename": "migration_tool_12.log", + "config_filename": "migration_tool_12.properties" + }, + { + "entity_id": "13", + "whitelist": ".*", + "num.producers": "2", + "num.streams": "2", + "producer.config": "migration_tool_testsuite/config/migration_producer.properties", + "consumer.config": "migration_tool_testsuite/config/migration_consumer.properties", + "zkclient.01.jar": "migration_tool_testsuite/0.7/lib/zkclient-0.1.jar", + "kafka.07.jar" : "migration_tool_testsuite/0.7/lib/kafka-0.7.0.jar", + "log_filename": "migration_tool_13.log", + "config_filename": "migration_tool_13.properties" + } + ] +} Index: system_test/mirror_maker_testsuite/mirror_maker_test.py =================================================================== --- system_test/mirror_maker_testsuite/mirror_maker_test.py (revision 1406271) +++ system_test/mirror_maker_testsuite/mirror_maker_test.py (working copy) @@ -191,6 +191,12 @@ # ============================================= i = 1 numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"]) + bouncedEntityDownTimeSec = 1 + try: + bouncedEntityDownTimeSec = int(self.testcaseEnv.testcaseArgumentsDict["bounced_entity_downtime_sec"]) + except: + pass + while i <= numIterations: self.log_message("Iteration " + str(i) + " of " + str(numIterations)) @@ -202,18 +208,20 @@ self.log_message("bounce_mirror_maker flag : " + bounceMirrorMaker) if (bounceMirrorMaker.lower() == "true"): - clusterConfigList = self.systemTestEnv.clusterEntityConfigDictList - mirrorMakerEntityIdList = system_test_utils.get_data_from_list_of_dicts( - clusterConfigList, "role", "mirror_maker", "entity_id") + clusterConfigList = self.systemTestEnv.clusterEntityConfigDictList + mirrorMakerEntityIdList = system_test_utils.get_data_from_list_of_dicts( + clusterConfigList, "role", "mirror_maker", "entity_id") + stoppedMirrorMakerEntityId = mirrorMakerEntityIdList[0] - mirrorMakerPPid = self.testcaseEnv.entityMirrorMakerParentPidDict[mirrorMakerEntityIdList[0]] + mirrorMakerPPid = self.testcaseEnv.entityMirrorMakerParentPidDict[stoppedMirrorMakerEntityId] self.log_message("stopping mirror maker : " + mirrorMakerPPid) - kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, mirrorMakerEntityIdList[0], mirrorMakerPPid) - time.sleep(1) + kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedMirrorMakerEntityId, mirrorMakerPPid) + self.anonLogger.info("sleeping for " + str(bouncedEntityDownTimeSec) + " sec") + time.sleep(bouncedEntityDownTimeSec) # starting previously terminated broker self.log_message("starting the previously terminated mirror maker") - kafka_system_test_utils.start_mirror_makers(self.systemTestEnv, self.testcaseEnv) + kafka_system_test_utils.start_mirror_makers(self.systemTestEnv, self.testcaseEnv, stoppedMirrorMakerEntityId) self.anonLogger.info("sleeping for 15s") time.sleep(15) Index: system_test/mirror_maker_testsuite/testcase_5003/cluster_config.json =================================================================== --- system_test/mirror_maker_testsuite/testcase_5003/cluster_config.json (revision 0) +++ system_test/mirror_maker_testsuite/testcase_5003/cluster_config.json (revision 0) @@ -0,0 +1,135 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9100" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9101" + }, + + { + "entity_id": "2", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9102" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9103" + }, + + { + "entity_id": "4", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9104" + }, + { + "entity_id": "5", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9105" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9106" + }, + + { + "entity_id": "7", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9107" + }, + { + "entity_id": "8", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9108" + }, + { + "entity_id": "9", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9109" + }, + + { + "entity_id": "10", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9110" + }, + { + "entity_id": "11", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9111" + }, + + { + "entity_id": "12", + "hostname": "localhost", + "role": "mirror_maker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9112" + }, + { + "entity_id": "13", + "hostname": "localhost", + "role": "mirror_maker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9113" + } + ] +} Index: system_test/mirror_maker_testsuite/testcase_5003/testcase_5003_properties.json =================================================================== --- system_test/mirror_maker_testsuite/testcase_5003/testcase_5003_properties.json (revision 0) +++ system_test/mirror_maker_testsuite/testcase_5003/testcase_5003_properties.json (revision 0) @@ -0,0 +1,143 @@ +{ + "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", + "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", + "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", + "04":"Produce and consume messages to a single topic - single partition.", + "05":"This test sends messages to 3 replicas", + "06":"At the end it verifies the log size and contents", + "07":"Use a consumer to verify no message loss in TARGET cluster.", + "08":"Producer dimensions : mode:async, acks:-1, comp:1", + "09":"Log segment size : 10240" + }, + "testcase_args": { + "bounce_leader": "false", + "bounce_mirror_maker": "true", + "bounced_entity_downtime_sec": "30", + "replica_factor": "3", + "num_partition": "1", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2108", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_0.log", + "config_filename": "zookeeper_0.properties" + }, + { + "entity_id": "1", + "clientPort": "2118", + "dataDir": "/tmp/zookeeper_1", + "log_filename": "zookeeper_1.log", + "config_filename": "zookeeper_1.properties" + }, + + { + "entity_id": "2", + "clientPort": "2128", + "dataDir": "/tmp/zookeeper_2", + "log_filename": "zookeeper_2.log", + "config_filename": "zookeeper_2.properties" + }, + { + "entity_id": "3", + "clientPort": "2138", + "dataDir": "/tmp/zookeeper_3", + "log_filename": "zookeeper_3.log", + "config_filename": "zookeeper_3.properties" + }, + + { + "entity_id": "4", + "port": "9091", + "brokerid": "1", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_4_logs", + "log_filename": "kafka_server_4.log", + "config_filename": "kafka_server_4.properties" + }, + { + "entity_id": "5", + "port": "9092", + "brokerid": "2", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_5_logs", + "log_filename": "kafka_server_5.log", + "config_filename": "kafka_server_5.properties" + }, + { + "entity_id": "6", + "port": "9093", + "brokerid": "3", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_6_logs", + "log_filename": "kafka_server_6.log", + "config_filename": "kafka_server_6.properties" + }, + { + "entity_id": "7", + "port": "9094", + "brokerid": "4", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_7_logs", + "log_filename": "kafka_server_7.log", + "config_filename": "kafka_server_7.properties" + }, + { + "entity_id": "8", + "port": "9095", + "brokerid": "5", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_8_logs", + "log_filename": "kafka_server_8.log", + "config_filename": "kafka_server_8.properties" + }, + { + "entity_id": "9", + "port": "9096", + "brokerid": "6", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_9_logs", + "log_filename": "kafka_server_9.log", + "config_filename": "kafka_server_9.properties" + }, + + { + "entity_id": "10", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"false", + "log_filename": "producer_performance_10.log", + "config_filename": "producer_performance_10.properties" + }, + { + "entity_id": "11", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_11.log", + "config_filename": "console_consumer_11.properties" + }, + + { + "entity_id": "12", + "log_filename": "mirror_maker_12.log", + "mirror_consumer_config_filename": "mirror_consumer_12.properties", + "mirror_producer_config_filename": "mirror_producer_12.properties" + }, + { + "entity_id": "13", + "log_filename": "mirror_maker_13.log", + "mirror_consumer_config_filename": "mirror_consumer_13.properties", + "mirror_producer_config_filename": "mirror_producer_13.properties" + } + ] +} Index: system_test/mirror_maker_testsuite/testcase_5004/cluster_config.json =================================================================== --- system_test/mirror_maker_testsuite/testcase_5004/cluster_config.json (revision 0) +++ system_test/mirror_maker_testsuite/testcase_5004/cluster_config.json (revision 0) @@ -0,0 +1,135 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9100" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9101" + }, + + { + "entity_id": "2", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9102" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9103" + }, + + { + "entity_id": "4", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9104" + }, + { + "entity_id": "5", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9105" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9106" + }, + + { + "entity_id": "7", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9107" + }, + { + "entity_id": "8", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9108" + }, + { + "entity_id": "9", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9109" + }, + + { + "entity_id": "10", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9110" + }, + { + "entity_id": "11", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9111" + }, + + { + "entity_id": "12", + "hostname": "localhost", + "role": "mirror_maker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9112" + }, + { + "entity_id": "13", + "hostname": "localhost", + "role": "mirror_maker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9113" + } + ] +} Index: system_test/mirror_maker_testsuite/testcase_5004/testcase_5004_properties.json =================================================================== --- system_test/mirror_maker_testsuite/testcase_5004/testcase_5004_properties.json (revision 0) +++ system_test/mirror_maker_testsuite/testcase_5004/testcase_5004_properties.json (revision 0) @@ -0,0 +1,143 @@ +{ + "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", + "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", + "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", + "04":"Produce and consume messages to a single topic - single partition.", + "05":"This test sends messages to 3 replicas", + "06":"At the end it verifies the log size and contents", + "07":"Use a consumer to verify no message loss in TARGET cluster.", + "08":"Producer dimensions : mode:async, acks:1, comp:1", + "09":"Log segment size : 10240" + }, + "testcase_args": { + "bounce_leader": "false", + "bounce_mirror_maker": "true", + "bounced_entity_downtime_sec": "30", + "replica_factor": "3", + "num_partition": "1", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2108", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_0.log", + "config_filename": "zookeeper_0.properties" + }, + { + "entity_id": "1", + "clientPort": "2118", + "dataDir": "/tmp/zookeeper_1", + "log_filename": "zookeeper_1.log", + "config_filename": "zookeeper_1.properties" + }, + + { + "entity_id": "2", + "clientPort": "2128", + "dataDir": "/tmp/zookeeper_2", + "log_filename": "zookeeper_2.log", + "config_filename": "zookeeper_2.properties" + }, + { + "entity_id": "3", + "clientPort": "2138", + "dataDir": "/tmp/zookeeper_3", + "log_filename": "zookeeper_3.log", + "config_filename": "zookeeper_3.properties" + }, + + { + "entity_id": "4", + "port": "9091", + "brokerid": "1", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_4_logs", + "log_filename": "kafka_server_4.log", + "config_filename": "kafka_server_4.properties" + }, + { + "entity_id": "5", + "port": "9092", + "brokerid": "2", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_5_logs", + "log_filename": "kafka_server_5.log", + "config_filename": "kafka_server_5.properties" + }, + { + "entity_id": "6", + "port": "9093", + "brokerid": "3", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_6_logs", + "log_filename": "kafka_server_6.log", + "config_filename": "kafka_server_6.properties" + }, + { + "entity_id": "7", + "port": "9094", + "brokerid": "4", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_7_logs", + "log_filename": "kafka_server_7.log", + "config_filename": "kafka_server_7.properties" + }, + { + "entity_id": "8", + "port": "9095", + "brokerid": "5", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_8_logs", + "log_filename": "kafka_server_8.log", + "config_filename": "kafka_server_8.properties" + }, + { + "entity_id": "9", + "port": "9096", + "brokerid": "6", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_9_logs", + "log_filename": "kafka_server_9.log", + "config_filename": "kafka_server_9.properties" + }, + + { + "entity_id": "10", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance_10.log", + "config_filename": "producer_performance_10.properties" + }, + { + "entity_id": "11", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_11.log", + "config_filename": "console_consumer_11.properties" + }, + + { + "entity_id": "12", + "log_filename": "mirror_maker_12.log", + "mirror_consumer_config_filename": "mirror_consumer_12.properties", + "mirror_producer_config_filename": "mirror_producer_12.properties" + }, + { + "entity_id": "13", + "log_filename": "mirror_maker_13.log", + "mirror_consumer_config_filename": "mirror_consumer_13.properties", + "mirror_producer_config_filename": "mirror_producer_13.properties" + } + ] +} Index: system_test/mirror_maker_testsuite/testcase_5005/cluster_config.json =================================================================== --- system_test/mirror_maker_testsuite/testcase_5005/cluster_config.json (revision 0) +++ system_test/mirror_maker_testsuite/testcase_5005/cluster_config.json (revision 0) @@ -0,0 +1,153 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9100" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9101" + }, + + { + "entity_id": "2", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9102" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9103" + }, + + { + "entity_id": "4", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9104" + }, + { + "entity_id": "5", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9105" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9106" + }, + + { + "entity_id": "7", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9107" + }, + { + "entity_id": "8", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9108" + }, + { + "entity_id": "9", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9109" + }, + + { + "entity_id": "10", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9110" + }, + { + "entity_id": "11", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9111" + }, + { + "entity_id": "12", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9112" + }, + { + "entity_id": "13", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9113" + }, + + { + "entity_id": "14", + "hostname": "localhost", + "role": "mirror_maker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9114" + }, + { + "entity_id": "15", + "hostname": "localhost", + "role": "mirror_maker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9115" + } + ] +} Index: system_test/mirror_maker_testsuite/testcase_5005/testcase_5005_properties.json =================================================================== --- system_test/mirror_maker_testsuite/testcase_5005/testcase_5005_properties.json (revision 0) +++ system_test/mirror_maker_testsuite/testcase_5005/testcase_5005_properties.json (revision 0) @@ -0,0 +1,164 @@ +{ + "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", + "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", + "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", + "04":"Produce and consume messages to 2 topics - 2 partitions.", + "05":"This test sends messages to 3 replicas", + "06":"At the end it verifies the log size and contents", + "07":"Use a consumer to verify no message loss in TARGET cluster.", + "08":"Producer dimensions : mode:async, acks:-1, comp:1", + "09":"Log segment size : 10240" + }, + "testcase_args": { + "bounce_leader": "false", + "bounce_mirror_maker": "true", + "bounced_entity_downtime_sec": "30", + "replica_factor": "3", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2108", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_0.log", + "config_filename": "zookeeper_0.properties" + }, + { + "entity_id": "1", + "clientPort": "2118", + "dataDir": "/tmp/zookeeper_1", + "log_filename": "zookeeper_1.log", + "config_filename": "zookeeper_1.properties" + }, + + { + "entity_id": "2", + "clientPort": "2128", + "dataDir": "/tmp/zookeeper_2", + "log_filename": "zookeeper_2.log", + "config_filename": "zookeeper_2.properties" + }, + { + "entity_id": "3", + "clientPort": "2138", + "dataDir": "/tmp/zookeeper_3", + "log_filename": "zookeeper_3.log", + "config_filename": "zookeeper_3.properties" + }, + + { + "entity_id": "4", + "port": "9091", + "brokerid": "1", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_4_logs", + "log_filename": "kafka_server_4.log", + "config_filename": "kafka_server_4.properties" + }, + { + "entity_id": "5", + "port": "9092", + "brokerid": "2", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_5_logs", + "log_filename": "kafka_server_5.log", + "config_filename": "kafka_server_5.properties" + }, + { + "entity_id": "6", + "port": "9093", + "brokerid": "3", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_6_logs", + "log_filename": "kafka_server_6.log", + "config_filename": "kafka_server_6.properties" + }, + { + "entity_id": "7", + "port": "9094", + "brokerid": "4", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_7_logs", + "log_filename": "kafka_server_7.log", + "config_filename": "kafka_server_7.properties" + }, + { + "entity_id": "8", + "port": "9095", + "brokerid": "5", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_8_logs", + "log_filename": "kafka_server_8.log", + "config_filename": "kafka_server_8.properties" + }, + { + "entity_id": "9", + "port": "9096", + "brokerid": "6", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_9_logs", + "log_filename": "kafka_server_9.log", + "config_filename": "kafka_server_9.properties" + }, + + { + "entity_id": "10", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"false", + "log_filename": "producer_performance_10.log", + "config_filename": "producer_performance_10.properties" + }, + { + "entity_id": "11", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"false", + "log_filename": "producer_performance_11.log", + "config_filename": "producer_performance_11.properties" + }, + + { + "entity_id": "12", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_12.log", + "config_filename": "console_consumer_12.properties" + }, + { + "entity_id": "13", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_13.log", + "config_filename": "console_consumer_13.properties" + }, + + { + "entity_id": "14", + "log_filename": "mirror_maker_14.log", + "mirror_consumer_config_filename": "mirror_consumer_14.properties", + "mirror_producer_config_filename": "mirror_producer_14.properties" + }, + { + "entity_id": "15", + "log_filename": "mirror_maker_15.log", + "mirror_consumer_config_filename": "mirror_consumer_15.properties", + "mirror_producer_config_filename": "mirror_producer_15.properties" + } + ] +} Index: system_test/mirror_maker_testsuite/testcase_5006/cluster_config.json =================================================================== --- system_test/mirror_maker_testsuite/testcase_5006/cluster_config.json (revision 0) +++ system_test/mirror_maker_testsuite/testcase_5006/cluster_config.json (revision 0) @@ -0,0 +1,153 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9100" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9101" + }, + + { + "entity_id": "2", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9102" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9103" + }, + + { + "entity_id": "4", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9104" + }, + { + "entity_id": "5", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9105" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "broker", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9106" + }, + + { + "entity_id": "7", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9107" + }, + { + "entity_id": "8", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9108" + }, + { + "entity_id": "9", + "hostname": "localhost", + "role": "broker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9109" + }, + + { + "entity_id": "10", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9110" + }, + { + "entity_id": "11", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name":"source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9111" + }, + { + "entity_id": "12", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9112" + }, + { + "entity_id": "13", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9113" + }, + + { + "entity_id": "14", + "hostname": "localhost", + "role": "mirror_maker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9114" + }, + { + "entity_id": "15", + "hostname": "localhost", + "role": "mirror_maker", + "cluster_name":"target", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9115" + } + ] +} Index: system_test/mirror_maker_testsuite/testcase_5006/testcase_5006_properties.json =================================================================== --- system_test/mirror_maker_testsuite/testcase_5006/testcase_5006_properties.json (revision 0) +++ system_test/mirror_maker_testsuite/testcase_5006/testcase_5006_properties.json (revision 0) @@ -0,0 +1,164 @@ +{ + "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker", + "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET", + "03":"Set up 2-node Zk cluster for both SOURCE & TARGET", + "04":"Produce and consume messages to 2 topics - 2 partitions.", + "05":"This test sends messages to 3 replicas", + "06":"At the end it verifies the log size and contents", + "07":"Use a consumer to verify no message loss in TARGET cluster.", + "08":"Producer dimensions : mode:async, acks:1, comp:1", + "09":"Log segment size : 10240" + }, + "testcase_args": { + "bounce_leader": "false", + "bounce_mirror_maker": "true", + "bounced_entity_downtime_sec": "30", + "replica_factor": "3", + "num_partition": "2", + "num_iteration": "1", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15", + "num_messages_to_produce_per_producer_call": "50" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2108", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_0.log", + "config_filename": "zookeeper_0.properties" + }, + { + "entity_id": "1", + "clientPort": "2118", + "dataDir": "/tmp/zookeeper_1", + "log_filename": "zookeeper_1.log", + "config_filename": "zookeeper_1.properties" + }, + + { + "entity_id": "2", + "clientPort": "2128", + "dataDir": "/tmp/zookeeper_2", + "log_filename": "zookeeper_2.log", + "config_filename": "zookeeper_2.properties" + }, + { + "entity_id": "3", + "clientPort": "2138", + "dataDir": "/tmp/zookeeper_3", + "log_filename": "zookeeper_3.log", + "config_filename": "zookeeper_3.properties" + }, + + { + "entity_id": "4", + "port": "9091", + "brokerid": "1", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_4_logs", + "log_filename": "kafka_server_4.log", + "config_filename": "kafka_server_4.properties" + }, + { + "entity_id": "5", + "port": "9092", + "brokerid": "2", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_5_logs", + "log_filename": "kafka_server_5.log", + "config_filename": "kafka_server_5.properties" + }, + { + "entity_id": "6", + "port": "9093", + "brokerid": "3", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_6_logs", + "log_filename": "kafka_server_6.log", + "config_filename": "kafka_server_6.properties" + }, + { + "entity_id": "7", + "port": "9094", + "brokerid": "4", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_7_logs", + "log_filename": "kafka_server_7.log", + "config_filename": "kafka_server_7.properties" + }, + { + "entity_id": "8", + "port": "9095", + "brokerid": "5", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_8_logs", + "log_filename": "kafka_server_8.log", + "config_filename": "kafka_server_8.properties" + }, + { + "entity_id": "9", + "port": "9096", + "brokerid": "6", + "log.file.size": "10240", + "log.dir": "/tmp/kafka_server_9_logs", + "log_filename": "kafka_server_9.log", + "config_filename": "kafka_server_9.properties" + }, + + { + "entity_id": "10", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance_10.log", + "config_filename": "producer_performance_10.properties" + }, + { + "entity_id": "11", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance_11.log", + "config_filename": "producer_performance_11.properties" + }, + + { + "entity_id": "12", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_12.log", + "config_filename": "console_consumer_12.properties" + }, + { + "entity_id": "13", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_13.log", + "config_filename": "console_consumer_13.properties" + }, + + { + "entity_id": "14", + "log_filename": "mirror_maker_14.log", + "mirror_consumer_config_filename": "mirror_consumer_14.properties", + "mirror_producer_config_filename": "mirror_producer_14.properties" + }, + { + "entity_id": "15", + "log_filename": "mirror_maker_15.log", + "mirror_consumer_config_filename": "mirror_consumer_15.properties", + "mirror_producer_config_filename": "mirror_producer_15.properties" + } + ] +} Index: system_test/replication_testsuite/testcase_0124/cluster_config.json =================================================================== --- system_test/replication_testsuite/testcase_0124/cluster_config.json (revision 0) +++ system_test/replication_testsuite/testcase_0124/cluster_config.json (revision 0) @@ -0,0 +1,76 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9990" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9991" + }, + { + "entity_id": "2", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9992" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9993" + }, + { + "entity_id": "4", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9997" + }, + { + "entity_id": "5", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9998" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9999" + }, + { + "entity_id": "7", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9099" + } + ] +} Index: system_test/replication_testsuite/testcase_0124/testcase_0124_properties.json =================================================================== --- system_test/replication_testsuite/testcase_0124/testcase_0124_properties.json (revision 0) +++ system_test/replication_testsuite/testcase_0124/testcase_0124_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. log.index.interval.bytes => 490", + "02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message", + "03":"Produce and consume messages to 2 topics - 3 partitions", + "04":"This test sends messages to 3 replicas", + "05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "06":"Restart the terminated broker", + "07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "08":"At the end it verifies the log size and contents", + "09":"Use a consumer to verify no message loss.", + "10":"Producer dimensions : mode:sync, acks:-1, comp:0", + "11":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "3", + "num_iteration": "3", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_1_logs", + "log.index.interval.bytes": "490", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_2_logs", + "log.index.interval.bytes": "490", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_3_logs", + "log.index.interval.bytes": "490", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/replication_testsuite/testcase_0125/cluster_config.json =================================================================== --- system_test/replication_testsuite/testcase_0125/cluster_config.json (revision 0) +++ system_test/replication_testsuite/testcase_0125/cluster_config.json (revision 0) @@ -0,0 +1,76 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9990" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9991" + }, + { + "entity_id": "2", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9992" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9993" + }, + { + "entity_id": "4", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9997" + }, + { + "entity_id": "5", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9998" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9999" + }, + { + "entity_id": "7", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9099" + } + ] +} Index: system_test/replication_testsuite/testcase_0125/testcase_0125_properties.json =================================================================== --- system_test/replication_testsuite/testcase_0125/testcase_0125_properties.json (revision 0) +++ system_test/replication_testsuite/testcase_0125/testcase_0125_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. ack => 1", + "02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message", + "03":"Produce and consume messages to 2 topics - 3 partitions", + "04":"This test sends messages to 3 replicas", + "05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "06":"Restart the terminated broker", + "07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "08":"At the end it verifies the log size and contents", + "09":"Use a consumer to verify no message loss.", + "10":"Producer dimensions : mode:sync, acks:1, comp:0", + "11":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "3", + "num_iteration": "3", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_1_logs", + "log.index.interval.bytes": "490", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_2_logs", + "log.index.interval.bytes": "490", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_3_logs", + "log.index.interval.bytes": "490", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/replication_testsuite/testcase_0126/cluster_config.json =================================================================== --- system_test/replication_testsuite/testcase_0126/cluster_config.json (revision 0) +++ system_test/replication_testsuite/testcase_0126/cluster_config.json (revision 0) @@ -0,0 +1,76 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9990" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9991" + }, + { + "entity_id": "2", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9992" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9993" + }, + { + "entity_id": "4", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9997" + }, + { + "entity_id": "5", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9998" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9999" + }, + { + "entity_id": "7", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9099" + } + ] +} Index: system_test/replication_testsuite/testcase_0126/testcase_0126_properties.json =================================================================== --- system_test/replication_testsuite/testcase_0126/testcase_0126_properties.json (revision 0) +++ system_test/replication_testsuite/testcase_0126/testcase_0126_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. ack => -1, 2. comp => 1", + "02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message", + "03":"Produce and consume messages to 2 topics - 3 partitions", + "04":"This test sends messages to 3 replicas", + "05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "06":"Restart the terminated broker", + "07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "08":"At the end it verifies the log size and contents", + "09":"Use a consumer to verify no message loss.", + "10":"Producer dimensions : mode:sync, acks:-1, comp:1", + "11":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "3", + "num_iteration": "3", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_1_logs", + "log.index.interval.bytes": "490", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_2_logs", + "log.index.interval.bytes": "490", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_3_logs", + "log.index.interval.bytes": "490", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/replication_testsuite/testcase_0127/cluster_config.json =================================================================== --- system_test/replication_testsuite/testcase_0127/cluster_config.json (revision 0) +++ system_test/replication_testsuite/testcase_0127/cluster_config.json (revision 0) @@ -0,0 +1,76 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9990" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9991" + }, + { + "entity_id": "2", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9992" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9993" + }, + { + "entity_id": "4", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9997" + }, + { + "entity_id": "5", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9998" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9999" + }, + { + "entity_id": "7", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9099" + } + ] +} Index: system_test/replication_testsuite/testcase_0127/testcase_0127_properties.json =================================================================== --- system_test/replication_testsuite/testcase_0127/testcase_0127_properties.json (revision 0) +++ system_test/replication_testsuite/testcase_0127/testcase_0127_properties.json (revision 0) @@ -0,0 +1,102 @@ +{ + "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. ack => 1", + "02":"Setting log.index.interval.bytes to slightly smaller than message size to force indexing on each message", + "03":"Produce and consume messages to 2 topics - 3 partitions", + "04":"This test sends messages to 3 replicas", + "05":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "06":"Restart the terminated broker", + "07":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "08":"At the end it verifies the log size and contents", + "09":"Use a consumer to verify no message loss.", + "10":"Producer dimensions : mode:sync, acks:1, comp:1", + "11":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "3", + "num_partition": "3", + "num_iteration": "3", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_1_logs", + "log.index.interval.bytes": "490", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_2_logs", + "log.index.interval.bytes": "490", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_3_logs", + "log.index.interval.bytes": "490", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/replication_testsuite/testcase_0131/cluster_config.json =================================================================== --- system_test/replication_testsuite/testcase_0131/cluster_config.json (revision 0) +++ system_test/replication_testsuite/testcase_0131/cluster_config.json (revision 0) @@ -0,0 +1,76 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9990" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9991" + }, + { + "entity_id": "2", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9992" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9993" + }, + { + "entity_id": "4", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9997" + }, + { + "entity_id": "5", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9998" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9999" + }, + { + "entity_id": "7", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9099" + } + ] +} Index: system_test/replication_testsuite/testcase_0131/testcase_0131_properties.json =================================================================== --- system_test/replication_testsuite/testcase_0131/testcase_0131_properties.json (revision 0) +++ system_test/replication_testsuite/testcase_0131/testcase_0131_properties.json (revision 0) @@ -0,0 +1,101 @@ +{ + "description": {"01":"Leader Failure in Replication with multi topics & partitions : Base Test", + "02":"Produce and consume messages to 2 topics - 3 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:-1, comp:0", + "10":"Log segment size : 102400" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "3", + "num_iteration": "3", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_1_logs", + "log.index.interval.bytes": "10", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_2_logs", + "log.index.interval.bytes": "10", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "102400", + "log.dir": "/tmp/kafka_server_3_logs", + "log.index.interval.bytes": "10", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "100", + "request-num-acks": "-1", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/replication_testsuite/testcase_0132/cluster_config.json =================================================================== --- system_test/replication_testsuite/testcase_0132/cluster_config.json (revision 0) +++ system_test/replication_testsuite/testcase_0132/cluster_config.json (revision 0) @@ -0,0 +1,76 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9990" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9991" + }, + { + "entity_id": "2", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9992" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9993" + }, + { + "entity_id": "4", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9997" + }, + { + "entity_id": "5", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9998" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9999" + }, + { + "entity_id": "7", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9099" + } + ] +} Index: system_test/replication_testsuite/testcase_0132/testcase_0132_properties.json =================================================================== --- system_test/replication_testsuite/testcase_0132/testcase_0132_properties.json (revision 0) +++ system_test/replication_testsuite/testcase_0132/testcase_0132_properties.json (revision 0) @@ -0,0 +1,98 @@ +{ + "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. acks => 1", + "02":"Produce and consume messages to 2 topics - 3 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:sync, acks:1, comp:0", + "10":"Log segment size : 512000" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "3", + "num_iteration": "3", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "512000", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "512000", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "512000", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "0", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"true", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/replication_testsuite/testcase_0133/cluster_config.json =================================================================== --- system_test/replication_testsuite/testcase_0133/cluster_config.json (revision 0) +++ system_test/replication_testsuite/testcase_0133/cluster_config.json (revision 0) @@ -0,0 +1,76 @@ +{ + "cluster_config": [ + { + "entity_id": "0", + "hostname": "localhost", + "role": "zookeeper", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9990" + }, + { + "entity_id": "1", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9991" + }, + { + "entity_id": "2", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9992" + }, + { + "entity_id": "3", + "hostname": "localhost", + "role": "broker", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9993" + }, + { + "entity_id": "4", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9997" + }, + { + "entity_id": "5", + "hostname": "localhost", + "role": "producer_performance", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9998" + }, + { + "entity_id": "6", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9999" + }, + { + "entity_id": "7", + "hostname": "localhost", + "role": "console_consumer", + "cluster_name": "source", + "kafka_home": "default", + "java_home": "default", + "jmx_port": "9099" + } + ] +} Index: system_test/replication_testsuite/testcase_0133/testcase_0133_properties.json =================================================================== --- system_test/replication_testsuite/testcase_0133/testcase_0133_properties.json (revision 0) +++ system_test/replication_testsuite/testcase_0133/testcase_0133_properties.json (revision 0) @@ -0,0 +1,98 @@ +{ + "description": {"01":"Leader Failure in Replication with multi topics & partitions : 1. mode => async; 2. comp => 0", + "02":"Produce and consume messages to 2 topics - 3 partitions", + "03":"This test sends messages to 2 replicas", + "04":"To trigger leader election: find the leader and terminate by controlled failure (kill -15)", + "05":"Restart the terminated broker", + "06":"Lookup brokers' log4j messages and verify that leader is re-elected successfully", + "07":"At the end it verifies the log size and contents", + "08":"Use a consumer to verify no message loss.", + "09":"Producer dimensions : mode:async, acks:1, comp:1", + "10":"Log segment size : 512000" + }, + "testcase_args": { + "broker_type": "leader", + "bounce_broker": "true", + "replica_factor": "2", + "num_partition": "3", + "num_iteration": "3", + "sleep_seconds_between_producer_calls": "1", + "message_producing_free_time_sec": "15" + }, + "entities": [ + { + "entity_id": "0", + "clientPort": "2188", + "dataDir": "/tmp/zookeeper_0", + "log_filename": "zookeeper_2188.log", + "config_filename": "zookeeper_2188.properties" + }, + { + "entity_id": "1", + "port": "9091", + "brokerid": "1", + "log.file.size": "512000", + "log.dir": "/tmp/kafka_server_1_logs", + "log_filename": "kafka_server_9091.log", + "config_filename": "kafka_server_9091.properties" + }, + { + "entity_id": "2", + "port": "9092", + "brokerid": "2", + "log.file.size": "512000", + "log.dir": "/tmp/kafka_server_2_logs", + "log_filename": "kafka_server_9092.log", + "config_filename": "kafka_server_9092.properties" + }, + { + "entity_id": "3", + "port": "9093", + "brokerid": "3", + "log.file.size": "512000", + "log.dir": "/tmp/kafka_server_3_logs", + "log_filename": "kafka_server_9093.log", + "config_filename": "kafka_server_9093.properties" + }, + { + "entity_id": "4", + "topic": "test_1", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance_4.log", + "config_filename": "producer_performance_4.properties" + }, + { + "entity_id": "5", + "topic": "test_2", + "threads": "5", + "compression-codec": "1", + "message-size": "500", + "message": "100", + "request-num-acks": "1", + "sync":"false", + "log_filename": "producer_performance_5.log", + "config_filename": "producer_performance_5.properties" + }, + { + "entity_id": "6", + "topic": "test_1", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_6.log", + "config_filename": "console_consumer_6.properties" + }, + { + "entity_id": "7", + "topic": "test_2", + "groupid": "mytestgroup", + "consumer-timeout-ms": "10000", + "log_filename": "console_consumer_7.log", + "config_filename": "console_consumer_7.properties" + } + ] +} Index: system_test/replication_testsuite/testcase_0153/testcase_0153_properties.json =================================================================== --- system_test/replication_testsuite/testcase_0153/testcase_0153_properties.json (revision 1406271) +++ system_test/replication_testsuite/testcase_0153/testcase_0153_properties.json (working copy) @@ -65,6 +65,7 @@ "message": "100", "request-num-acks": "1", "sync":"true", + "producer-retry-backoff-ms": "2500", "log_filename": "producer_performance.log", "config_filename": "producer_performance.properties" }, Index: system_test/replication_testsuite/testcase_0154/testcase_0154_properties.json =================================================================== --- system_test/replication_testsuite/testcase_0154/testcase_0154_properties.json (revision 1406271) +++ system_test/replication_testsuite/testcase_0154/testcase_0154_properties.json (working copy) @@ -65,6 +65,7 @@ "message": "100", "request-num-acks": "1", "sync":"false", + "producer-retry-backoff-ms": "2500", "log_filename": "producer_performance.log", "config_filename": "producer_performance.properties" }, Index: system_test/replication_testsuite/testcase_0157/testcase_0157_properties.json =================================================================== --- system_test/replication_testsuite/testcase_0157/testcase_0157_properties.json (revision 1406271) +++ system_test/replication_testsuite/testcase_0157/testcase_0157_properties.json (working copy) @@ -65,6 +65,7 @@ "message": "100", "request-num-acks": "1", "sync":"true", + "producer-retry-backoff-ms": "2500", "log_filename": "producer_performance.log", "config_filename": "producer_performance.properties" }, Index: system_test/replication_testsuite/testcase_0158/testcase_0158_properties.json =================================================================== --- system_test/replication_testsuite/testcase_0158/testcase_0158_properties.json (revision 1406271) +++ system_test/replication_testsuite/testcase_0158/testcase_0158_properties.json (working copy) @@ -65,6 +65,7 @@ "message": "100", "request-num-acks": "1", "sync":"false", + "producer-retry-backoff-ms": "2500", "log_filename": "producer_performance.log", "config_filename": "producer_performance.properties" }, Index: system_test/testcase_to_run_all.json =================================================================== --- system_test/testcase_to_run_all.json (revision 1406271) +++ system_test/testcase_to_run_all.json (working copy) @@ -38,7 +38,15 @@ "testcase_0121", "testcase_0122", "testcase_0123", + "testcase_0124", + "testcase_0125", + "testcase_0126", + "testcase_0127", + "testcase_0131", + "testcase_0132", + "testcase_0133", + "testcase_0151", "testcase_0152", "testcase_0153", @@ -76,12 +84,40 @@ "testcase_0308" ], + "LogRetentionTest" : [ + "testcase_4001", + "testcase_4002", + "testcase_4003", + "testcase_4004", + "testcase_4005", + "testcase_4006", + "testcase_4007", + "testcase_4008", + + "testcase_4011", + "testcase_4012", + "testcase_4013", + "testcase_4014", + "testcase_4015", + "testcase_4016", + "testcase_4017", + "testcase_4018" + ], + "MigrationToolTest" : [ - "testcase_9001" + "testcase_9001", + "testcase_9003", + "testcase_9004", + "testcase_9005", + "testcase_9006" ], "MirrorMakerTest" : [ "testcase_5001", - "testcase_5002" + "testcase_5002", + "testcase_5003", + "testcase_5004", + "testcase_5005", + "testcase_5006" ] } Index: system_test/utils/kafka_system_test_utils.py =================================================================== --- system_test/utils/kafka_system_test_utils.py (revision 1406271) +++ system_test/utils/kafka_system_test_utils.py (working copy) @@ -174,6 +174,20 @@ logger.debug("executing command [" + cmdStr + "]", extra=d) system_test_utils.sys_call(cmdStr) + # ============================== + # collect ZK log + # ============================== + if role == "zookeeper": + dataLogPathName = system_test_utils.get_data_by_lookup_keyval( + testcaseEnv.testcaseConfigsList, "entity_id", entity_id, "dataDir") + + cmdList = ["scp -r", + hostname + ":" + dataLogPathName, + logPathName] + cmdStr = " ".join(cmdList) + logger.debug("executing command [" + cmdStr + "]", extra=d) + system_test_utils.sys_call(cmdStr) + # ============================== # collect dashboards file # ============================== @@ -485,14 +499,17 @@ start_entity_in_background(systemTestEnv, testcaseEnv, brokerEntityId) -def start_mirror_makers(systemTestEnv, testcaseEnv): - clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList +def start_mirror_makers(systemTestEnv, testcaseEnv, onlyThisEntityId=None): - brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts( - clusterEntityConfigDictList, "role", "mirror_maker", "entity_id") + if onlyThisEntityId is not None: + start_entity_in_background(systemTestEnv, testcaseEnv, onlyThisEntityId) + else: + clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList + brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts( + clusterEntityConfigDictList, "role", "mirror_maker", "entity_id") - for brokerEntityId in brokerEntityIdList: - start_entity_in_background(systemTestEnv, testcaseEnv, brokerEntityId) + for brokerEntityId in brokerEntityIdList: + start_entity_in_background(systemTestEnv, testcaseEnv, brokerEntityId) def get_broker_shutdown_log_line(systemTestEnv, testcaseEnv, leaderAttributesDict): @@ -1040,9 +1057,10 @@ if not "checksum:" in line: continue else: - matchObj = re.match('.*checksum:(\d*?).*', line) + matchObj = re.match('.*checksum:(\d*).*', line) if matchObj is not None: - messageChecksumList.append( matchObj.group(1) ) + checksum = matchObj.group(1) + messageChecksumList.append( checksum ) else: logger.error("unexpected log line : " + line, extra=d) @@ -1311,53 +1329,71 @@ stop_remote_entity(systemTestEnv, entityId, zkParentPid) -def start_migration_tool(systemTestEnv, testcaseEnv): +def start_migration_tool(systemTestEnv, testcaseEnv, onlyThisEntityId=None): clusterConfigList = systemTestEnv.clusterEntityConfigDictList migrationToolConfigList = system_test_utils.get_dict_from_list_of_dicts(clusterConfigList, "role", "migration_tool") - migrationToolConfig = migrationToolConfigList[0] - host = migrationToolConfig["hostname"] - entityId = migrationToolConfig["entity_id"] - jmxPort = migrationToolConfig["jmx_port"] - role = migrationToolConfig["role"] - kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "kafka_home") - javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "java_home") - jmxPort = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "jmx_port") - kafkaRunClassBin = kafkaHome + "/bin/kafka-run-class.sh" + for migrationToolConfig in migrationToolConfigList: - logger.info("starting kafka migration tool", extra=d) - migrationToolLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "migration_tool", entityId, "default") - migrationToolLogPathName = migrationToolLogPath + "/migration_tool.log" - testcaseEnv.userDefinedEnvVarDict["migrationToolLogPathName"] = migrationToolLogPathName + entityId = migrationToolConfig["entity_id"] - testcaseConfigsList = testcaseEnv.testcaseConfigsList - numProducers = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "num.producers") - numStreams = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "num.streams") - producerConfig = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "producer.config") - consumerConfig = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "consumer.config") - zkClientJar = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "zkclient.01.jar") - kafka07Jar = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "kafka.07.jar") - whiteList = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "whitelist") - logFile = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "log_filename") + if onlyThisEntityId is None or entityId == onlyThisEntityId: - cmdList = ["ssh " + host, - "'JAVA_HOME=" + javaHome, - "JMX_PORT=" + jmxPort, - kafkaRunClassBin + " kafka.tools.KafkaMigrationTool", - "--whitelist=" + whiteList, - "--num.producers=" + numProducers, - "--num.streams=" + numStreams, - "--producer.config=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + producerConfig, - "--consumer.config=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + consumerConfig, - "--zkclient.01.jar=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + zkClientJar, - "--kafka.07.jar=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + kafka07Jar, - " &> " + migrationToolLogPath + "/migrationTool.log", - " & echo pid:$! > " + migrationToolLogPath + "/entity_" + entityId + "_pid'"] + host = migrationToolConfig["hostname"] + jmxPort = migrationToolConfig["jmx_port"] + role = migrationToolConfig["role"] + kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "kafka_home") + javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "java_home") + jmxPort = system_test_utils.get_data_by_lookup_keyval(clusterConfigList, "entity_id", entityId, "jmx_port") + kafkaRunClassBin = kafkaHome + "/bin/kafka-run-class.sh" - cmdStr = " ".join(cmdList) - logger.debug("executing command: [" + cmdStr + "]", extra=d) - system_test_utils.async_sys_call(cmdStr) + logger.info("starting kafka migration tool", extra=d) + migrationToolLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "migration_tool", entityId, "default") + migrationToolLogPathName = migrationToolLogPath + "/migration_tool.log" + testcaseEnv.userDefinedEnvVarDict["migrationToolLogPathName"] = migrationToolLogPathName + testcaseConfigsList = testcaseEnv.testcaseConfigsList + numProducers = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "num.producers") + numStreams = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "num.streams") + producerConfig = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "producer.config") + consumerConfig = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "consumer.config") + zkClientJar = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "zkclient.01.jar") + kafka07Jar = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "kafka.07.jar") + whiteList = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "whitelist") + logFile = system_test_utils.get_data_by_lookup_keyval(testcaseConfigsList, "entity_id", entityId, "log_filename") + + cmdList = ["ssh " + host, + "'JAVA_HOME=" + javaHome, + "JMX_PORT=" + jmxPort, + kafkaRunClassBin + " kafka.tools.KafkaMigrationTool", + "--whitelist=" + whiteList, + "--num.producers=" + numProducers, + "--num.streams=" + numStreams, + "--producer.config=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + producerConfig, + "--consumer.config=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + consumerConfig, + "--zkclient.01.jar=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + zkClientJar, + "--kafka.07.jar=" + systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + kafka07Jar, + " &> " + migrationToolLogPath + "/migrationTool.log", + " & echo pid:$! > " + migrationToolLogPath + "/entity_" + entityId + "_pid'"] + + cmdStr = " ".join(cmdList) + logger.debug("executing command: [" + cmdStr + "]", extra=d) + system_test_utils.async_sys_call(cmdStr) + time.sleep(5) + + pidCmdStr = "ssh " + host + " 'cat " + migrationToolLogPath + "/entity_" + entityId + "_pid' 2> /dev/null" + logger.debug("executing command: [" + pidCmdStr + "]", extra=d) + subproc = system_test_utils.sys_call_return_subproc(pidCmdStr) + + # keep track of the remote entity pid in a dictionary + for line in subproc.stdout.readlines(): + if line.startswith("pid"): + line = line.rstrip('\n') + logger.debug("found pid line: [" + line + "]", extra=d) + tokens = line.split(':') + testcaseEnv.entityMigrationToolParentPidDict[entityId] = tokens[1] + + def validate_07_08_migrated_data_matched(systemTestEnv, testcaseEnv): validationStatusDict = testcaseEnv.validationStatusDict clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList @@ -1392,30 +1428,40 @@ consumerLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "console_consumer", matchingConsumerEntityId, "default") consumerLogPathName = consumerLogPath + "/console_consumer.log" - producerMsgChecksumList = get_message_checksum(producerLogPathName) - consumerMsgChecksumList = get_message_checksum(consumerLogPathName) - producerMsgChecksumSet = set(producerMsgChecksumList) - consumerMsgChecksumSet = set(consumerMsgChecksumList) + producerMsgChecksumList = get_message_checksum(producerLogPathName) + consumerMsgChecksumList = get_message_checksum(consumerLogPathName) + producerMsgChecksumSet = set(producerMsgChecksumList) + consumerMsgChecksumSet = set(consumerMsgChecksumList) + producerMsgChecksumUniqList = list(producerMsgChecksumSet) + consumerMsgChecksumUniqList = list(consumerMsgChecksumSet) missingMsgChecksumInConsumer = producerMsgChecksumSet - consumerMsgChecksumSet + logger.debug("size of producerMsgChecksumList : " + str(len(producerMsgChecksumList)), extra=d) + logger.debug("size of consumerMsgChecksumList : " + str(len(consumerMsgChecksumList)), extra=d) + logger.debug("size of producerMsgChecksumSet : " + str(len(producerMsgChecksumSet)), extra=d) + logger.debug("size of consumerMsgChecksumSet : " + str(len(consumerMsgChecksumSet)), extra=d) + logger.debug("size of producerMsgChecksumUniqList : " + str(len(producerMsgChecksumUniqList)), extra=d) + logger.debug("size of consumerMsgChecksumUniqList : " + str(len(consumerMsgChecksumUniqList)), extra=d) + logger.debug("size of missingMsgChecksumInConsumer : " + str(len(missingMsgChecksumInConsumer)), extra=d) + outfile = open(msgChecksumMissingInConsumerLogPathName, "w") for id in missingMsgChecksumInConsumer: outfile.write(id + "\n") outfile.close() - logger.info("no. of unique messages on topic [" + topic + "] sent from publisher : " + str(len(producerMsgChecksumList)), extra=d) - logger.info("no. of unique messages on topic [" + topic + "] received by consumer : " + str(len(consumerMsgChecksumList)), extra=d) - validationStatusDict["Unique messages from producer on [" + topic + "]"] = str(len(producerMsgChecksumList)) - validationStatusDict["Unique messages from consumer on [" + topic + "]"] = str(len(consumerMsgChecksumList)) + logger.info("no. of messages on topic [" + topic + "] sent from producer : " + str(len(producerMsgChecksumList)), extra=d) + logger.info("no. of messages on topic [" + topic + "] received by consumer : " + str(len(consumerMsgChecksumList)), extra=d) + logger.info("no. of unique messages on topic [" + topic + "] sent from producer : " + str(len(producerMsgChecksumUniqList)), extra=d) + logger.info("no. of unique messages on topic [" + topic + "] received by consumer : " + str(len(consumerMsgChecksumUniqList)), extra=d) + validationStatusDict["Unique messages from producer on [" + topic + "]"] = str(len(list(producerMsgChecksumSet))) + validationStatusDict["Unique messages from consumer on [" + topic + "]"] = str(len(list(consumerMsgChecksumSet))) - if ( len(missingMsgChecksumInConsumer) == 0 and len(producerMsgChecksumList) > 0 ): + if ( len(producerMsgChecksumList) > 0 and len(list(producerMsgChecksumSet)) == len(list(consumerMsgChecksumSet))): validationStatusDict["Validate for data matched on topic [" + topic + "]"] = "PASSED" - #return True else: validationStatusDict["Validate for data matched on topic [" + topic + "]"] = "FAILED" logger.info("See " + msgChecksumMissingInConsumerLogPathName + " for missing MessageID", extra=d) - #return False def validate_broker_log_segment_checksum(systemTestEnv, testcaseEnv, clusterName="source"): @@ -1542,7 +1588,7 @@ else: validationStatusDict["Validate for merged log segment checksum in cluster [" + clusterName + "]"] = "FAILED" -def start_simple_consumer(systemTestEnv, testcaseEnv): +def start_simple_consumer(systemTestEnv, testcaseEnv, minStartingOffsetDict=None): clusterList = systemTestEnv.clusterEntityConfigDictList consumerConfigList = system_test_utils.get_dict_from_list_of_dicts(clusterList, "role", "console_consumer") @@ -1586,20 +1632,27 @@ numPartitions = int(numPartitions) replicaIndex = 1 + startingOffset = -2 brokerPortList = brokerListStr.split(',') for brokerPort in brokerPortList: - k = 0 - while (k < numPartitions): - logger.info("starting debug consumer for replica on [" + brokerPort + "] partition [" + str(k) + "]", extra=d) + partitionId = 0 + while (partitionId < numPartitions): + logger.info("starting debug consumer for replica on [" + brokerPort + "] partition [" + str(partitionId) + "]", extra=d) + + if minStartingOffsetDict is not None: + topicPartition = topic + "-" + str(partitionId) + startingOffset = minStartingOffsetDict[topicPartition] + brokerPortLabel = brokerPort.replace(":", "_") cmdList = ["ssh " + host, "'JAVA_HOME=" + javaHome, kafkaRunClassBin + " kafka.tools.SimpleConsumerShell", "--broker-list " + brokerListStr, "--topic " + topic, - "--partition " + str(k), + "--partition " + str(partitionId), "--replica " + str(replicaIndex), + "--offset " + str(startingOffset), "--no-wait-at-logend ", " >> " + consumerLogPath + "/simple_consumer_" + str(replicaIndex) + ".log", " & echo pid:$! > " + consumerLogPath + "/entity_" + entityId + "_pid'"] @@ -1607,24 +1660,13 @@ cmdStr = " ".join(cmdList) logger.debug("executing command: [" + cmdStr + "]", extra=d) - system_test_utils.async_sys_call(cmdStr) - time.sleep(2) - - pidCmdStr = "ssh " + host + " 'cat " + consumerLogPath + "/entity_" + entityId + "_pid'" - logger.debug("executing command: [" + pidCmdStr + "]", extra=d) - subproc = system_test_utils.sys_call_return_subproc(pidCmdStr) - - # keep track of the remote entity pid in a dictionary - for line in subproc.stdout.readlines(): - if line.startswith("pid"): - line = line.rstrip('\n') - logger.debug("found pid line: [" + line + "]", extra=d) - tokens = line.split(':') - testcaseEnv.consumerHostParentPidDict[host] = tokens[1] - - logger.info("sleeping for 5 sec",extra=d) - time.sleep(5) - k += 1 + subproc_1 = system_test_utils.sys_call_return_subproc(cmdStr) + # dummy for-loop to wait until the process is completed + for line in subproc_1.stdout.readlines(): + pass + time.sleep(1) + + partitionId += 1 replicaIndex += 1 def validate_simple_consumer_data_matched(systemTestEnv, testcaseEnv): @@ -1733,4 +1775,117 @@ tcConfigsList, "brokerid", brokerid, "entity_id") return controllerDict +def getMinCommonStartingOffset(systemTestEnv, testcaseEnv, clusterName="source"): + brokerLogStartOffsetDict = {} + minCommonStartOffsetDict = {} + + tcConfigsList = testcaseEnv.testcaseConfigsList + clusterConfigList = systemTestEnv.clusterEntityConfigDictList + allBrokerConfigList = system_test_utils.get_dict_from_list_of_dicts(clusterConfigList, "role", "broker") + brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts(allBrokerConfigList, "cluster_name", clusterName, "entity_id") + + # loop through all brokers + for brokerEntityId in sorted(brokerEntityIdList): + # remoteLogSegmentPathName : /tmp/kafka_server_4_logs + # => remoteLogSegmentDir : kafka_server_4_logs + remoteLogSegmentPathName = system_test_utils.get_data_by_lookup_keyval(tcConfigsList, "entity_id", brokerEntityId, "log.dir") + remoteLogSegmentDir = os.path.basename(remoteLogSegmentPathName) + logPathName = get_testcase_config_log_dir_pathname(testcaseEnv, "broker", brokerEntityId, "default") + localLogSegmentPath = logPathName + "/" + remoteLogSegmentDir + + # loop through all topicPartition directories such as : test_1-0, test_1-1, ... + for topicPartition in sorted(os.listdir(localLogSegmentPath)): + # found a topic-partition directory + if os.path.isdir(localLogSegmentPath + "/" + topicPartition): + + # startingOffsetKey : : (eg. 1:test_1-0) + startingOffsetKey = brokerEntityId + ":" + topicPartition + + # log segment files are located in : localLogSegmentPath + "/" + topicPartition + # sort the log segment files under each topic-partition + for logFile in sorted(os.listdir(localLogSegmentPath + "/" + topicPartition)): + + # logFile is located at: + # system_test/xxxx_testsuite/testcase_xxxx/logs/broker-1/kafka_server_1_logs/test_1-0/00000000000000003800.log + if logFile.endswith(".log"): + matchObj = re.match("0*(.*)\.log", logFile) # remove the leading zeros & the file extension + startingOffset = matchObj.group(1) # this is the starting offset from the file name + if len(startingOffset) == 0: # when log filename is: 00000000000000000000.log + startingOffset = "0" + + # starting offset of a topic-partition can be retrieved from the filename of the first log segment + # => break out of this innest for-loop after processing the first log segment file + brokerLogStartOffsetDict[startingOffsetKey] = startingOffset + break + + # brokerLogStartOffsetDict is like this: + # {u'1:test_1-0': u'400', + # u'1:test_1-1': u'400', + # u'1:test_2-0': u'200', + # u'1:test_2-1': u'200', + # u'2:test_1-0': u'400', + # u'2:test_1-1': u'400', + # u'2:test_2-0': u'200', + # u'2:test_2-1': u'200', + # u'3:test_1-0': '0', + # u'3:test_1-1': '0', + # u'3:test_2-0': '0', + # u'3:test_2-1': '0'} + + # loop through brokerLogStartOffsetDict to get the min common starting offset for each topic-partition + for brokerTopicPartition in sorted(brokerLogStartOffsetDict.iterkeys()): + topicPartition = brokerTopicPartition.split(':')[1] + + if topicPartition in minCommonStartOffsetDict: + # key exists => if the new value is greater, replace the existing value with new + if minCommonStartOffsetDict[topicPartition] < brokerLogStartOffsetDict[brokerTopicPartition]: + minCommonStartOffsetDict[topicPartition] = brokerLogStartOffsetDict[brokerTopicPartition] + else: + # key doesn't exist => add it to the dictionary + minCommonStartOffsetDict[topicPartition] = brokerLogStartOffsetDict[brokerTopicPartition] + + # returning minCommonStartOffsetDict which is like this: + # {u'test_1-0': u'400', + # u'test_1-1': u'400', + # u'test_2-0': u'200', + # u'test_2-1': u'200'} + return minCommonStartOffsetDict + +def validate_simple_consumer_data_matched_across_replicas(systemTestEnv, testcaseEnv): + validationStatusDict = testcaseEnv.validationStatusDict + clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList + consumerEntityIdList = system_test_utils.get_data_from_list_of_dicts( + clusterEntityConfigDictList, "role", "console_consumer", "entity_id") + + mismatchCount = 0 + for consumerEntityId in consumerEntityIdList: + + topic = system_test_utils.get_data_by_lookup_keyval(testcaseEnv.testcaseConfigsList, "entity_id", consumerEntityId, "topic") + consumerLogPath = get_testcase_config_log_dir_pathname(testcaseEnv, "console_consumer", consumerEntityId, "default") + + firstReplicaMessageCount = -1 + + for logFile in sorted(os.listdir(consumerLogPath)): + # each simple_consumer_n.log contains all data in 1 replica for this topic + if logFile.startswith("simple_consumer_") and logFile.endswith(".log"): + consumerLogPathName = consumerLogPath + "/" + logFile + consumerMsgIdList = get_message_id(consumerLogPathName) + consumerMsgIdSet = set(consumerMsgIdList) + + if firstReplicaMessageCount == -1: + # save the first replica message count as reference + firstReplicaMessageCount = consumerMsgIdSet + elif firstReplicaMessageCount != consumerMsgIdSet: + # if subsequent replicas message count not matching => test fails + mismatchCount += 1 + + logger.info("no. of unique messages on topic [" + topic + "] at " + logFile + " : " + str(len(consumerMsgIdSet)), extra=d) + validationStatusDict["Unique messages from consumer on [" + topic + "] at " + logFile] = str(len(consumerMsgIdSet)) + + if mismatchCount == 0: + validationStatusDict["Validate for data matched on topic [" + topic + "] across replicas"] = "PASSED" + else: + validationStatusDict["Validate for data matched on topic [" + topic + "] across replicas"] = "FAILED" + + Index: system_test/utils/testcase_env.py =================================================================== --- system_test/utils/testcase_env.py (revision 1406271) +++ system_test/utils/testcase_env.py (working copy) @@ -51,6 +51,12 @@ # { 0: 12345, 1: 12389, ... } entityMirrorMakerParentPidDict = {} + # dictionary of entity_id to ppid for migration tool entities + # key: entity_id + # val: ppid of broker associated to that entity_id + # { 0: 12345, 1: 12389, ... } + entityMigrationToolParentPidDict = {} + # dictionary of entity_id to list of JMX ppid # key: entity_id # val: list of JMX ppid associated to that entity_id