-Xmx1G -Xms1G -server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dkafka.logs.dir=""/logs"" "-Dlog4j.configuration=file:C:\kafka_2.11-1.1.0\bin\windows\../../config/log4j.properties" -cp "C:\kafka_2.11-1.1.0\libs\aopalliance-repackaged-2.5.0-b32.jar";"C:\kafka_2.11-1.1.0\libs\argparse4j-0.7.0.jar";"C:\kafka_2.11-1.1.0\libs\commons-lang3-3.5.jar";"C:\kafka_2.11-1.1.0\libs\connect-api-1.1.0.jar";"C:\kafka_2.11-1.1.0\libs\connect-file-1.1.0.jar";"C:\kafka_2.11-1.1.0\libs\connect-json-1.1.0.jar";"C:\kafka_2.11-1.1.0\libs\connect-runtime-1.1.0.jar";"C:\kafka_2.11-1.1.0\libs\connect-transforms-1.1.0.jar";"C:\kafka_2.11-1.1.0\libs\guava-20.0.jar";"C:\kafka_2.11-1.1.0\libs\hk2-api-2.5.0-b32.jar";"C:\kafka_2.11-1.1.0\libs\hk2-locator-2.5.0-b32.jar";"C:\kafka_2.11-1.1.0\libs\hk2-utils-2.5.0-b32.jar";"C:\kafka_2.11-1.1.0\libs\jackson-annotations-2.9.4.jar";"C:\kafka_2.11-1.1.0\libs\jackson-core-2.9.4.jar";"C:\kafka_2.11-1.1.0\libs\jackson-databind-2.9.4.jar";"C:\kafka_2.11-1.1.0\libs\jackson-jaxrs-base-2.9.4.jar";"C:\kafka_2.11-1.1.0\libs\jackson-jaxrs-json-provider-2.9.4.jar";"C:\kafka_2.11-1.1.0\libs\jackson-module-jaxb-annotations-2.9.4.jar";"C:\kafka_2.11-1.1.0\libs\javassist-3.20.0-GA.jar";"C:\kafka_2.11-1.1.0\libs\javassist-3.21.0-GA.jar";"C:\kafka_2.11-1.1.0\libs\javax.annotation-api-1.2.jar";"C:\kafka_2.11-1.1.0\libs\javax.inject-1.jar";"C:\kafka_2.11-1.1.0\libs\javax.inject-2.5.0-b32.jar";"C:\kafka_2.11-1.1.0\libs\javax.servlet-api-3.1.0.jar";"C:\kafka_2.11-1.1.0\libs\javax.ws.rs-api-2.0.1.jar";"C:\kafka_2.11-1.1.0\libs\jersey-client-2.25.1.jar";"C:\kafka_2.11-1.1.0\libs\jersey-common-2.25.1.jar";"C:\kafka_2.11-1.1.0\libs\jersey-container-servlet-2.25.1.jar";"C:\kafka_2.11-1.1.0\libs\jersey-container-servlet-core-2.25.1.jar";"C:\kafka_2.11-1.1.0\libs\jersey-guava-2.25.1.jar";"C:\kafka_2.11-1.1.0\libs\jersey-media-jaxb-2.25.1.jar";"C:\kafka_2.11-1.1.0\libs\jersey-server-2.25.1.jar";"C:\kafka_2.11-1.1.0\libs\jetty-client-9.2.24.v20180105.jar";"C:\kafka_2.11-1.1.0\libs\jetty-continuation-9.2.24.v20180105.jar";"C:\kafka_2.11-1.1.0\libs\jetty-http-9.2.24.v20180105.jar";"C:\kafka_2.11-1.1.0\libs\jetty-io-9.2.24.v20180105.jar";"C:\kafka_2.11-1.1.0\libs\jetty-security-9.2.24.v20180105.jar";"C:\kafka_2.11-1.1.0\libs\jetty-server-9.2.24.v20180105.jar";"C:\kafka_2.11-1.1.0\libs\jetty-servlet-9.2.24.v20180105.jar";"C:\kafka_2.11-1.1.0\libs\jetty-servlets-9.2.24.v20180105.jar";"C:\kafka_2.11-1.1.0\libs\jetty-util-9.2.24.v20180105.jar";"C:\kafka_2.11-1.1.0\libs\jopt-simple-5.0.4.jar";"C:\kafka_2.11-1.1.0\libs\kafka-clients-1.1.0.jar";"C:\kafka_2.11-1.1.0\libs\kafka-log4j-appender-1.1.0.jar";"C:\kafka_2.11-1.1.0\libs\kafka-streams-1.1.0.jar";"C:\kafka_2.11-1.1.0\libs\kafka-streams-examples-1.1.0.jar";"C:\kafka_2.11-1.1.0\libs\kafka-streams-test-utils-1.1.0.jar";"C:\kafka_2.11-1.1.0\libs\kafka-tools-1.1.0.jar";"C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-javadoc.jar";"C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-javadoc.jar.asc";"C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-scaladoc.jar";"C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-scaladoc.jar.asc";"C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-sources.jar";"C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-sources.jar.asc";"C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-test-sources.jar";"C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-test-sources.jar.asc";"C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-test.jar";"C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-test.jar.asc";"C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0.jar";"C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0.jar.asc";"C:\kafka_2.11-1.1.0\libs\log4j-1.2.17.jar";"C:\kafka_2.11-1.1.0\libs\lz4-java-1.4.jar";"C:\kafka_2.11-1.1.0\libs\maven-artifact-3.5.2.jar";"C:\kafka_2.11-1.1.0\libs\metrics-core-2.2.0.jar";"C:\kafka_2.11-1.1.0\libs\osgi-resource-locator-1.0.1.jar";"C:\kafka_2.11-1.1.0\libs\plexus-utils-3.1.0.jar";"C:\kafka_2.11-1.1.0\libs\reflections-0.9.11.jar";"C:\kafka_2.11-1.1.0\libs\rocksdbjni-5.7.3.jar";"C:\kafka_2.11-1.1.0\libs\scala-library-2.11.12.jar";"C:\kafka_2.11-1.1.0\libs\scala-logging_2.11-3.7.2.jar";"C:\kafka_2.11-1.1.0\libs\scala-reflect-2.11.12.jar";"C:\kafka_2.11-1.1.0\libs\sdb-1.1.0-M3-2.11.jar";"C:\kafka_2.11-1.1.0\libs\slf4j-api-1.7.25.jar";"C:\kafka_2.11-1.1.0\libs\slf4j-log4j12-1.7.25.jar";"C:\kafka_2.11-1.1.0\libs\snappy-java-1.1.7.1.jar";"C:\kafka_2.11-1.1.0\libs\validation-api-1.1.0.Final.jar";"C:\kafka_2.11-1.1.0\libs\zkclient-0.10.jar";"C:\kafka_2.11-1.1.0\libs\zookeeper-3.4.10.jar" kafka.Kafka ..\..\config\server.properties [2018-05-15 04:05:21,011] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) [2018-05-15 04:05:21,451] INFO starting (kafka.server.KafkaServer) [2018-05-15 04:05:21,452] INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer) [2018-05-15 04:05:21,469] INFO [ZooKeeperClient] Initializing a new session to localhost:2181. (kafka.zookeeper.ZooKeeperClient) [2018-05-15 04:05:21,496] INFO Client environment:zookeeper.version=3.4.10-39d3a4f269333c922ed3db283be479f9deacaa0f, built on 03/23/2017 10:13 GMT (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,496] INFO Client environment:host.name=mmpc.Home (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,496] INFO Client environment:java.version=1.8.0_141 (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,496] INFO Client environment:java.vendor=Oracle Corporation (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,496] INFO Client environment:java.home=C:\jdk1.8.0\jre (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,496] INFO Client environment:java.class.path=C:\kafka_2.11-1.1.0\libs\aopalliance-repackaged-2.5.0-b32.jar;C:\kafka_2.11-1.1.0\libs\argparse4j-0.7.0.jar;C:\kafka_2.11-1.1.0\libs\commons-lang3-3.5.jar;C:\kafka_2.11-1.1.0\libs\connect-api-1.1.0.jar;C:\kafka_2.11-1.1.0\libs\connect-file-1.1.0.jar;C:\kafka_2.11-1.1.0\libs\connect-json-1.1.0.jar;C:\kafka_2.11-1.1.0\libs\connect-runtime-1.1.0.jar;C:\kafka_2.11-1.1.0\libs\connect-transforms-1.1.0.jar;C:\kafka_2.11-1.1.0\libs\guava-20.0.jar;C:\kafka_2.11-1.1.0\libs\hk2-api-2.5.0-b32.jar;C:\kafka_2.11-1.1.0\libs\hk2-locator-2.5.0-b32.jar;C:\kafka_2.11-1.1.0\libs\hk2-utils-2.5.0-b32.jar;C:\kafka_2.11-1.1.0\libs\jackson-annotations-2.9.4.jar;C:\kafka_2.11-1.1.0\libs\jackson-core-2.9.4.jar;C:\kafka_2.11-1.1.0\libs\jackson-databind-2.9.4.jar;C:\kafka_2.11-1.1.0\libs\jackson-jaxrs-base-2.9.4.jar;C:\kafka_2.11-1.1.0\libs\jackson-jaxrs-json-provider-2.9.4.jar;C:\kafka_2.11-1.1.0\libs\jackson-module-jaxb-annotations-2.9.4.jar;C:\kafka_2.11-1.1.0\libs\javassist-3.20.0-GA.jar;C:\kafka_2.11-1.1.0\libs\javassist-3.21.0-GA.jar;C:\kafka_2.11-1.1.0\libs\javax.annotation-api-1.2.jar;C:\kafka_2.11-1.1.0\libs\javax.inject-1.jar;C:\kafka_2.11-1.1.0\libs\javax.inject-2.5.0-b32.jar;C:\kafka_2.11-1.1.0\libs\javax.servlet-api-3.1.0.jar;C:\kafka_2.11-1.1.0\libs\javax.ws.rs-api-2.0.1.jar;C:\kafka_2.11-1.1.0\libs\jersey-client-2.25.1.jar;C:\kafka_2.11-1.1.0\libs\jersey-common-2.25.1.jar;C:\kafka_2.11-1.1.0\libs\jersey-container-servlet-2.25.1.jar;C:\kafka_2.11-1.1.0\libs\jersey-container-servlet-core-2.25.1.jar;C:\kafka_2.11-1.1.0\libs\jersey-guava-2.25.1.jar;C:\kafka_2.11-1.1.0\libs\jersey-media-jaxb-2.25.1.jar;C:\kafka_2.11-1.1.0\libs\jersey-server-2.25.1.jar;C:\kafka_2.11-1.1.0\libs\jetty-client-9.2.24.v20180105.jar;C:\kafka_2.11-1.1.0\libs\jetty-continuation-9.2.24.v20180105.jar;C:\kafka_2.11-1.1.0\libs\jetty-http-9.2.24.v20180105.jar;C:\kafka_2.11-1.1.0\libs\jetty-io-9.2.24.v20180105.jar;C:\kafka_2.11-1.1.0\libs\jetty-security-9.2.24.v20180105.jar;C:\kafka_2.11-1.1.0\libs\jetty-server-9.2.24.v20180105.jar;C:\kafka_2.11-1.1.0\libs\jetty-servlet-9.2.24.v20180105.jar;C:\kafka_2.11-1.1.0\libs\jetty-servlets-9.2.24.v20180105.jar;C:\kafka_2.11-1.1.0\libs\jetty-util-9.2.24.v20180105.jar;C:\kafka_2.11-1.1.0\libs\jopt-simple-5.0.4.jar;C:\kafka_2.11-1.1.0\libs\kafka-clients-1.1.0.jar;C:\kafka_2.11-1.1.0\libs\kafka-log4j-appender-1.1.0.jar;C:\kafka_2.11-1.1.0\libs\kafka-streams-1.1.0.jar;C:\kafka_2.11-1.1.0\libs\kafka-streams-examples-1.1.0.jar;C:\kafka_2.11-1.1.0\libs\kafka-streams-test-utils-1.1.0.jar;C:\kafka_2.11-1.1.0\libs\kafka-tools-1.1.0.jar;C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-javadoc.jar;C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-javadoc.jar.asc;C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-scaladoc.jar;C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-scaladoc.jar.asc;C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-sources.jar;C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-sources.jar.asc;C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-test-sources.jar;C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-test-sources.jar.asc;C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-test.jar;C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0-test.jar.asc;C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0.jar;C:\kafka_2.11-1.1.0\libs\kafka_2.11-1.1.0.jar.asc;C:\kafka_2.11-1.1.0\libs\log4j-1.2.17.jar;C:\kafka_2.11-1.1.0\libs\lz4-java-1.4.jar;C:\kafka_2.11-1.1.0\libs\maven-artifact-3.5.2.jar;C:\kafka_2.11-1.1.0\libs\metrics-core-2.2.0.jar;C:\kafka_2.11-1.1.0\libs\osgi-resource-locator-1.0.1.jar;C:\kafka_2.11-1.1.0\libs\plexus-utils-3.1.0.jar;C:\kafka_2.11-1.1.0\libs\reflections-0.9.11.jar;C:\kafka_2.11-1.1.0\libs\rocksdbjni-5.7.3.jar;C:\kafka_2.11-1.1.0\libs\scala-library-2.11.12.jar;C:\kafka_2.11-1.1.0\libs\scala-logging_2.11-3.7.2.jar;C:\kafka_2.11-1.1.0\libs\scala-reflect-2.11.12.jar;C:\kafka_2.11-1.1.0\libs\sdb-1.1.0-M3-2.11.jar;C:\kafka_2.11-1.1.0\libs\slf4j-api-1.7.25.jar;C:\kafka_2.11-1.1.0\libs\slf4j-log4j12-1.7.25.jar;C:\kafka_2.11-1.1.0\libs\snappy-java-1.1.7.1.jar;C:\kafka_2.11-1.1.0\libs\validation-api-1.1.0.Final.jar;C:\kafka_2.11-1.1.0\libs\zkclient-0.10.jar;C:\kafka_2.11-1.1.0\libs\zookeeper-3.4.10.jar (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,497] INFO Client environment:java.library.path=C:\jdk1.8.0\bin;C:\WINDOWS\Sun\Java\bin;C:\WINDOWS\system32;C:\WINDOWS;C:\Program Files (x86)\Common Files\Oracle\Java\javapath;C:\apache-maven-3.0.5\bin;C:\Python34\;C:\Python34\Scripts;C:\Program Files (x86)\NVIDIA Corporation\PhysX\Common;C:\Python33\;C:\Program Files\Common Files\Microsoft Shared\Windows Live;C:\Program Files (x86)\Common Files\Microsoft Shared\Windows Live;C:\WINDOWS\system32;C:\WINDOWS;C:\WINDOWS\System32\Wbem;C:\WINDOWS\System32\WindowsPowerShell\v1.0\;C:\Program Files\Lenovo\Bluetooth Software\;C:\Program Files\Lenovo\Bluetooth Software\syswow64;C:\Program Files (x86)\EgisTec BioExcess\x64;C:\Program Files (x86)\EgisTec BioExcess\;C:\Program Files (x86)\EgisTec Port Locker\x64;C:\Program Files (x86)\EgisTec Port Locker\;C:\Program Files (x86)\Windows Live\Shared;C:\MinGW\bin;c:\Program Files (x86)\Microsoft SQL Server\100\Tools\Binn\;c:\Program Files\Microsoft SQL Server\100\Tools\Binn\;c:\Program Files\Microsoft SQL Server\100\DTS\Binn\;%JUNIT_HOME%\;C:\jdk1.8.0\\bin;c:\gradle-2.14\bin;C:\Program Files\TortoiseSVN\bin;C:\Program Files (x86)\Microsoft SQL Server\130\Tools\Binn\;C:\Program Files\Microsoft SQL Server\130\Tools\Binn\;C:\Program Files (x86)\Microsoft SQL Server\130\DTS\Binn\;C:\Program Files\Microsoft SQL Server\130\DTS\Binn\;C:\Program Files\Microsoft SQL Server\Client SDK\ODBC\130\Tools\Binn\;C:\Program Files (x86)\Microsoft SQL Server\Client SDK\ODBC\130\Tools\Binn\;C:\Program Files (x86)\Microsoft SQL Server\140\Tools\Binn\;C:\Program Files (x86)\Microsoft SQL Server\140\DTS\Binn\;C:\Program Files (x86)\Microsoft SQL Server\140\Tools\Binn\ManagementStudio\;C:\Program Files\PuTTY\;C:\OpenSSL-Win64\bin;C:\Program Files\TortoiseGit\bin;C:\Program Files (x86)\Windows Kits\10\Windows Performance Toolkit\;C:\Users\manna\AppData\Local\Microsoft\WindowsApps;;C:\Program Files\Microsoft VS Code Insiders\bin;. (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,497] INFO Client environment:java.io.tmpdir=C:\Users\manna\AppData\Local\Temp\ (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,498] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,498] INFO Client environment:os.name=Windows 10 (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,498] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,498] INFO Client environment:os.version=10.0 (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,498] INFO Client environment:user.name=manna (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,498] INFO Client environment:user.home=C:\Users\manna (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,498] INFO Client environment:user.dir=C:\kafka_2.11-1.1.0\bin\windows (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,501] INFO Initiating client connection, connectString=localhost:2181 sessionTimeout=6000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@7e07db1f (org.apache.zookeeper.ZooKeeper) [2018-05-15 04:05:21,525] DEBUG Initializing task scheduler. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:21,526] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) [2018-05-15 04:05:21,528] INFO Opening socket connection to server 0:0:0:0:0:0:0:1/0:0:0:0:0:0:0:1:2181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) [2018-05-15 04:05:21,530] INFO Socket connection established to 0:0:0:0:0:0:0:1/0:0:0:0:0:0:0:1:2181, initiating session (org.apache.zookeeper.ClientCnxn) [2018-05-15 04:05:21,542] INFO Session establishment complete on server 0:0:0:0:0:0:0:1/0:0:0:0:0:0:0:1:2181, sessionid = 0x16360b537e30016, negotiated timeout = 6000 (org.apache.zookeeper.ClientCnxn) [2018-05-15 04:05:21,545] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient) [2018-05-15 04:05:21,883] INFO Cluster ID = KFCus-CsTRa0p-wX7VDlQQ (kafka.server.KafkaServer) [2018-05-15 04:05:21,958] INFO KafkaConfig values: advertised.host.name = null advertised.listeners = PLAINTEXT://localhost:9092 advertised.port = null alter.config.policy.class.name = null alter.log.dirs.replication.quota.window.num = 11 alter.log.dirs.replication.quota.window.size.seconds = 1 authorizer.class.name = auto.create.topics.enable = true auto.leader.rebalance.enable = true background.threads = 10 broker.id = 1 broker.id.generation.enable = true broker.rack = null compression.type = producer connections.max.idle.ms = 600000 controlled.shutdown.enable = true controlled.shutdown.max.retries = 3 controlled.shutdown.retry.backoff.ms = 5000 controller.socket.timeout.ms = 30000 create.topic.policy.class.name = null default.replication.factor = 1 delegation.token.expiry.check.interval.ms = 3600000 delegation.token.expiry.time.ms = 86400000 delegation.token.master.key = null delegation.token.max.lifetime.ms = 604800000 delete.records.purgatory.purge.interval.requests = 1 delete.topic.enable = true fetch.purgatory.purge.interval.requests = 1000 group.initial.rebalance.delay.ms = 2000 group.max.session.timeout.ms = 300000 group.min.session.timeout.ms = 6000 host.name = inter.broker.listener.name = null inter.broker.protocol.version = 1.1-IV0 leader.imbalance.check.interval.seconds = 300 leader.imbalance.per.broker.percentage = 10 listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL listeners = null log.cleaner.backoff.ms = 15000 log.cleaner.dedupe.buffer.size = 134217728 log.cleaner.delete.retention.ms = 86400000 log.cleaner.enable = true log.cleaner.io.buffer.load.factor = 0.9 log.cleaner.io.buffer.size = 524288 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 log.cleaner.min.cleanable.ratio = 0.5 log.cleaner.min.compaction.lag.ms = 0 log.cleaner.threads = 1 log.cleanup.policy = [delete] log.dir = /tmp/kafka-logs log.dirs = /kafka1 log.flush.interval.messages = 9223372036854775807 log.flush.interval.ms = null log.flush.offset.checkpoint.interval.ms = 60000 log.flush.scheduler.interval.ms = 9223372036854775807 log.flush.start.offset.checkpoint.interval.ms = 60000 log.index.interval.bytes = 4096 log.index.size.max.bytes = 10485760 log.message.format.version = 1.1-IV0 log.message.timestamp.difference.max.ms = 9223372036854775807 log.message.timestamp.type = CreateTime log.preallocate = false log.retention.bytes = 12485760 log.retention.check.interval.ms = 300000 log.retention.hours = 1 log.retention.minutes = 12 log.retention.ms = null log.roll.hours = 1 log.roll.jitter.hours = 0 log.roll.jitter.ms = null log.roll.ms = null log.segment.bytes = 10485760 log.segment.delete.delay.ms = 60000 max.connections.per.ip = 2147483647 max.connections.per.ip.overrides = max.incremental.fetch.session.cache.slots = 1000 message.max.bytes = 1000012 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 min.insync.replicas = 1 num.io.threads = 8 num.network.threads = 3 num.partitions = 1 num.recovery.threads.per.data.dir = 3 num.replica.alter.log.dirs.threads = null num.replica.fetchers = 1 offset.metadata.max.bytes = 4096 offsets.commit.required.acks = -1 offsets.commit.timeout.ms = 5000 offsets.load.buffer.size = 5242880 offsets.retention.check.interval.ms = 600000 offsets.retention.minutes = 16 offsets.topic.compression.codec = 0 offsets.topic.num.partitions = 50 offsets.topic.replication.factor = 1 offsets.topic.segment.bytes = 104857600 password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding password.encoder.iterations = 4096 password.encoder.key.length = 128 password.encoder.keyfactory.algorithm = null password.encoder.old.secret = null password.encoder.secret = null port = 9092 principal.builder.class = null producer.purgatory.purge.interval.requests = 1000 queued.max.request.bytes = -1 queued.max.requests = 500 quota.consumer.default = 9223372036854775807 quota.producer.default = 9223372036854775807 quota.window.num = 11 quota.window.size.seconds = 1 replica.fetch.backoff.ms = 1000 replica.fetch.max.bytes = 1048576 replica.fetch.min.bytes = 1 replica.fetch.response.max.bytes = 10485760 replica.fetch.wait.max.ms = 500 replica.high.watermark.checkpoint.interval.ms = 5000 replica.lag.time.max.ms = 10000 replica.socket.receive.buffer.bytes = 65536 replica.socket.timeout.ms = 30000 replication.quota.window.num = 11 replication.quota.window.size.seconds = 1 request.timeout.ms = 30000 reserved.broker.max.id = 1000 sasl.enabled.mechanisms = [GSSAPI] sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.principal.to.local.rules = [DEFAULT] sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.mechanism.inter.broker.protocol = GSSAPI security.inter.broker.protocol = PLAINTEXT socket.receive.buffer.bytes = 102400 socket.request.max.bytes = 104857600 socket.send.buffer.bytes = 102400 ssl.cipher.suites = [] ssl.client.auth = none ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] ssl.endpoint.identification.algorithm = null ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.protocol = TLS ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000 transaction.max.timeout.ms = 900000 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 transaction.state.log.load.buffer.size = 5242880 transaction.state.log.min.isr = 1 transaction.state.log.num.partitions = 50 transaction.state.log.replication.factor = 1 transaction.state.log.segment.bytes = 104857600 transactional.id.expiration.ms = 604800000 unclean.leader.election.enable = false zookeeper.connect = localhost:2181 zookeeper.connection.timeout.ms = 10000 zookeeper.max.in.flight.requests = 10 zookeeper.session.timeout.ms = 6000 zookeeper.set.acl = false zookeeper.sync.time.ms = 2000 (kafka.server.KafkaConfig) [2018-05-15 04:05:21,968] INFO KafkaConfig values: advertised.host.name = null advertised.listeners = PLAINTEXT://localhost:9092 advertised.port = null alter.config.policy.class.name = null alter.log.dirs.replication.quota.window.num = 11 alter.log.dirs.replication.quota.window.size.seconds = 1 authorizer.class.name = auto.create.topics.enable = true auto.leader.rebalance.enable = true background.threads = 10 broker.id = 1 broker.id.generation.enable = true broker.rack = null compression.type = producer connections.max.idle.ms = 600000 controlled.shutdown.enable = true controlled.shutdown.max.retries = 3 controlled.shutdown.retry.backoff.ms = 5000 controller.socket.timeout.ms = 30000 create.topic.policy.class.name = null default.replication.factor = 1 delegation.token.expiry.check.interval.ms = 3600000 delegation.token.expiry.time.ms = 86400000 delegation.token.master.key = null delegation.token.max.lifetime.ms = 604800000 delete.records.purgatory.purge.interval.requests = 1 delete.topic.enable = true fetch.purgatory.purge.interval.requests = 1000 group.initial.rebalance.delay.ms = 2000 group.max.session.timeout.ms = 300000 group.min.session.timeout.ms = 6000 host.name = inter.broker.listener.name = null inter.broker.protocol.version = 1.1-IV0 leader.imbalance.check.interval.seconds = 300 leader.imbalance.per.broker.percentage = 10 listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL listeners = null log.cleaner.backoff.ms = 15000 log.cleaner.dedupe.buffer.size = 134217728 log.cleaner.delete.retention.ms = 86400000 log.cleaner.enable = true log.cleaner.io.buffer.load.factor = 0.9 log.cleaner.io.buffer.size = 524288 log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 log.cleaner.min.cleanable.ratio = 0.5 log.cleaner.min.compaction.lag.ms = 0 log.cleaner.threads = 1 log.cleanup.policy = [delete] log.dir = /tmp/kafka-logs log.dirs = /kafka1 log.flush.interval.messages = 9223372036854775807 log.flush.interval.ms = null log.flush.offset.checkpoint.interval.ms = 60000 log.flush.scheduler.interval.ms = 9223372036854775807 log.flush.start.offset.checkpoint.interval.ms = 60000 log.index.interval.bytes = 4096 log.index.size.max.bytes = 10485760 log.message.format.version = 1.1-IV0 log.message.timestamp.difference.max.ms = 9223372036854775807 log.message.timestamp.type = CreateTime log.preallocate = false log.retention.bytes = 12485760 log.retention.check.interval.ms = 300000 log.retention.hours = 1 log.retention.minutes = 12 log.retention.ms = null log.roll.hours = 1 log.roll.jitter.hours = 0 log.roll.jitter.ms = null log.roll.ms = null log.segment.bytes = 10485760 log.segment.delete.delay.ms = 60000 max.connections.per.ip = 2147483647 max.connections.per.ip.overrides = max.incremental.fetch.session.cache.slots = 1000 message.max.bytes = 1000012 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 min.insync.replicas = 1 num.io.threads = 8 num.network.threads = 3 num.partitions = 1 num.recovery.threads.per.data.dir = 3 num.replica.alter.log.dirs.threads = null num.replica.fetchers = 1 offset.metadata.max.bytes = 4096 offsets.commit.required.acks = -1 offsets.commit.timeout.ms = 5000 offsets.load.buffer.size = 5242880 offsets.retention.check.interval.ms = 600000 offsets.retention.minutes = 16 offsets.topic.compression.codec = 0 offsets.topic.num.partitions = 50 offsets.topic.replication.factor = 1 offsets.topic.segment.bytes = 104857600 password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding password.encoder.iterations = 4096 password.encoder.key.length = 128 password.encoder.keyfactory.algorithm = null password.encoder.old.secret = null password.encoder.secret = null port = 9092 principal.builder.class = null producer.purgatory.purge.interval.requests = 1000 queued.max.request.bytes = -1 queued.max.requests = 500 quota.consumer.default = 9223372036854775807 quota.producer.default = 9223372036854775807 quota.window.num = 11 quota.window.size.seconds = 1 replica.fetch.backoff.ms = 1000 replica.fetch.max.bytes = 1048576 replica.fetch.min.bytes = 1 replica.fetch.response.max.bytes = 10485760 replica.fetch.wait.max.ms = 500 replica.high.watermark.checkpoint.interval.ms = 5000 replica.lag.time.max.ms = 10000 replica.socket.receive.buffer.bytes = 65536 replica.socket.timeout.ms = 30000 replication.quota.window.num = 11 replication.quota.window.size.seconds = 1 request.timeout.ms = 30000 reserved.broker.max.id = 1000 sasl.enabled.mechanisms = [GSSAPI] sasl.jaas.config = null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.principal.to.local.rules = [DEFAULT] sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter = 0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.mechanism.inter.broker.protocol = GSSAPI security.inter.broker.protocol = PLAINTEXT socket.receive.buffer.bytes = 102400 socket.request.max.bytes = 104857600 socket.send.buffer.bytes = 102400 ssl.cipher.suites = [] ssl.client.auth = none ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] ssl.endpoint.identification.algorithm = null ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.protocol = TLS ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000 transaction.max.timeout.ms = 900000 transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 transaction.state.log.load.buffer.size = 5242880 transaction.state.log.min.isr = 1 transaction.state.log.num.partitions = 50 transaction.state.log.replication.factor = 1 transaction.state.log.segment.bytes = 104857600 transactional.id.expiration.ms = 604800000 unclean.leader.election.enable = false zookeeper.connect = localhost:2181 zookeeper.connection.timeout.ms = 10000 zookeeper.max.in.flight.requests = 10 zookeeper.session.timeout.ms = 6000 zookeeper.set.acl = false zookeeper.sync.time.ms = 2000 (kafka.server.KafkaConfig) [2018-05-15 04:05:21,974] DEBUG Initializing task scheduler. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:21,997] INFO [ThrottledRequestReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledRequestReaper) [2018-05-15 04:05:21,998] INFO [ThrottledRequestReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledRequestReaper) [2018-05-15 04:05:22,000] INFO [ThrottledRequestReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledRequestReaper) [2018-05-15 04:05:22,045] INFO Loading logs. (kafka.log.LogManager) [2018-05-15 04:05:22,079] DEBUG Loading log 'test1-0' (kafka.log.LogManager) [2018-05-15 04:05:22,079] DEBUG Loading log 'test-0' (kafka.log.LogManager) [2018-05-15 04:05:22,079] DEBUG Loading log '__consumer_offsets-0' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. (&*(&*(&)(&()*&)(Log Segment Opened by.. (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) java.lang.Thread.getStackTrace(Thread.java:1559) java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) kafka.log.Log.loadSegmentFiles(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) kafka.log.Log.loadSegments(Log.scala:403) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) kafka.log.Log.(Log.scala:216) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) kafka.log.Log$.apply(Log.scala:1747) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) kafka.log.Log.loadSegmentFiles(Log.scala:320) java.util.concurrent.FutureTask.run(FutureTask.java:266) kafka.log.Log.loadSegmentFiles(Log.scala:320) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) kafka.log.Log.loadSegments(Log.scala:403) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) kafka.log.Log.loadSegments(Log.scala:403) java.lang.Thread.run(Thread.java:748) kafka.log.Log.(Log.scala:216) C:\kafka1\test-0 named files kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) C:\kafka1\test1-0 named files java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-0 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. [2018-05-15 04:05:22,151] WARN [Log partition=__consumer_offsets-0, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-0\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-0\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) java.lang.Thread.getStackTrace(Thread.java:1559) [2018-05-15 04:05:22,151] WARN [Log partition=test1-0, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\test1-0\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\test1-0\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\test-0 named files [2018-05-15 04:05:22,177] WARN [Log partition=test-0, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\test-0\00000000000000000045.log due to Corrupt index found, index file (C:\kafka1\test-0\00000000000000000045.index) has non-zero size but the last offset is 45 which is no greater than the base offset 45.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,190] INFO [ProducerStateManager partition=test-0] Loading producer state from snapshot file 'C:\kafka1\test-0\00000000000000000045.snapshot' (kafka.log.ProducerStateManager) [2018-05-15 04:05:22,228] INFO [Log partition=test1-0, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,228] INFO [Log partition=__consumer_offsets-0, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,238] INFO [Log partition=test-0, dir=C:\kafka1] Recovering unflushed segment 45 (kafka.log.Log) [2018-05-15 04:05:22,249] INFO [ProducerStateManager partition=test-0] Loading producer state from snapshot file 'C:\kafka1\test-0\00000000000000000045.snapshot' (kafka.log.ProducerStateManager) [2018-05-15 04:05:22,272] INFO [Log partition=__consumer_offsets-0, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,272] INFO [Log partition=test1-0, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,281] INFO [Log partition=test-0, dir=C:\kafka1] Loading producer state from offset 45 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,289] INFO [Log partition=__consumer_offsets-0, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 191 ms (kafka.log.Log) [2018-05-15 04:05:22,289] INFO [Log partition=test1-0, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 191 ms (kafka.log.Log) [2018-05-15 04:05:22,311] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:22,312] DEBUG Loading log '__consumer_offsets-1' (kafka.log.LogManager) [2018-05-15 04:05:22,311] INFO [ProducerStateManager partition=test-0] Loading producer state from snapshot file 'C:\kafka1\test-0\00000000000000000045.snapshot' (kafka.log.ProducerStateManager) [2018-05-15 04:05:22,320] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:22,321] DEBUG Loading log '__consumer_offsets-10' (kafka.log.LogManager) [2018-05-15 04:05:22,321] INFO [Log partition=test-0, dir=C:\kafka1] Completed load of log with 2 segments, log start offset 0 and log end offset 45 in 222 ms (kafka.log.Log) [2018-05-15 04:05:22,325] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:22,328] DEBUG Loading log '__consumer_offsets-11' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.lang.Thread.run(Thread.java:748) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) C:\kafka1\__consumer_offsets-10 named files java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-1 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-11 named files [2018-05-15 04:05:22,361] WARN [Log partition=__consumer_offsets-1, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-1\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-1\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,361] WARN [Log partition=__consumer_offsets-10, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-10\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-10\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,367] WARN [Log partition=__consumer_offsets-11, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-11\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-11\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,394] INFO [Log partition=__consumer_offsets-1, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,394] INFO [Log partition=__consumer_offsets-10, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,400] INFO [Log partition=__consumer_offsets-11, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,433] INFO [Log partition=__consumer_offsets-1, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,433] INFO [Log partition=__consumer_offsets-10, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,443] INFO [Log partition=__consumer_offsets-11, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,448] INFO [Log partition=__consumer_offsets-1, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 122 ms (kafka.log.Log) [2018-05-15 04:05:22,449] INFO [Log partition=__consumer_offsets-10, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 123 ms (kafka.log.Log) [2018-05-15 04:05:22,451] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:22,452] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:22,452] DEBUG Loading log '__consumer_offsets-12' (kafka.log.LogManager) [2018-05-15 04:05:22,452] DEBUG Loading log '__consumer_offsets-13' (kafka.log.LogManager) [2018-05-15 04:05:22,459] INFO [Log partition=__consumer_offsets-11, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 127 ms (kafka.log.Log) (&*(&*(&)(&()*&)(Log Segment Opened by.. [2018-05-15 04:05:22,463] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) [2018-05-15 04:05:22,464] DEBUG Loading log '__consumer_offsets-14' (kafka.log.LogManager) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) C:\kafka1\__consumer_offsets-13 named files scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-12 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-14 named files [2018-05-15 04:05:22,489] WARN [Log partition=__consumer_offsets-13, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-13\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-13\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,490] WARN [Log partition=__consumer_offsets-12, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-12\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-12\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,502] WARN [Log partition=__consumer_offsets-14, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-14\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-14\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,522] INFO [Log partition=__consumer_offsets-12, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,522] INFO [Log partition=__consumer_offsets-13, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,535] INFO [Log partition=__consumer_offsets-14, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,563] INFO [Log partition=__consumer_offsets-13, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,563] INFO [Log partition=__consumer_offsets-12, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,578] INFO [Log partition=__consumer_offsets-13, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 122 ms (kafka.log.Log) [2018-05-15 04:05:22,580] INFO [Log partition=__consumer_offsets-14, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,581] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:22,582] DEBUG Loading log '__consumer_offsets-15' (kafka.log.LogManager) [2018-05-15 04:05:22,585] INFO [Log partition=__consumer_offsets-12, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 129 ms (kafka.log.Log) [2018-05-15 04:05:22,589] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:22,591] DEBUG Loading log '__consumer_offsets-16' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-15 named files [2018-05-15 04:05:22,600] INFO [Log partition=__consumer_offsets-14, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 132 ms (kafka.log.Log) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) [2018-05-15 04:05:22,604] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) [2018-05-15 04:05:22,605] DEBUG Loading log '__consumer_offsets-17' (kafka.log.LogManager) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-16 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-17 named files [2018-05-15 04:05:22,623] WARN [Log partition=__consumer_offsets-15, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-15\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-15\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,632] WARN [Log partition=__consumer_offsets-16, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-16\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-16\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,642] WARN [Log partition=__consumer_offsets-17, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-17\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-17\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,658] INFO [Log partition=__consumer_offsets-15, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,665] INFO [Log partition=__consumer_offsets-16, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,676] INFO [Log partition=__consumer_offsets-17, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,701] INFO [Log partition=__consumer_offsets-15, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,705] INFO [Log partition=__consumer_offsets-16, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,715] INFO [Log partition=__consumer_offsets-15, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 129 ms (kafka.log.Log) [2018-05-15 04:05:22,716] INFO [Log partition=__consumer_offsets-17, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,719] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:22,720] DEBUG Loading log '__consumer_offsets-18' (kafka.log.LogManager) [2018-05-15 04:05:22,721] INFO [Log partition=__consumer_offsets-16, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 125 ms (kafka.log.Log) [2018-05-15 04:05:22,724] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:22,724] DEBUG Loading log '__consumer_offsets-19' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) [2018-05-15 04:05:22,733] INFO [Log partition=__consumer_offsets-17, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 124 ms (kafka.log.Log) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-18 named files [2018-05-15 04:05:22,735] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) (&*(&*(&)(&()*&)(Log Segment Opened by.. [2018-05-15 04:05:22,736] DEBUG Loading log '__consumer_offsets-2' (kafka.log.LogManager) java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-19 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-2 named files [2018-05-15 04:05:22,758] WARN [Log partition=__consumer_offsets-18, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-18\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-18\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,766] WARN [Log partition=__consumer_offsets-19, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-19\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-19\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,780] WARN [Log partition=__consumer_offsets-2, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-2\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-2\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,795] INFO [Log partition=__consumer_offsets-18, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,813] INFO [Log partition=__consumer_offsets-19, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,814] INFO [Log partition=__consumer_offsets-2, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,836] INFO [Log partition=__consumer_offsets-18, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,850] INFO [Log partition=__consumer_offsets-18, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 126 ms (kafka.log.Log) [2018-05-15 04:05:22,853] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:22,853] INFO [Log partition=__consumer_offsets-19, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,854] DEBUG Loading log '__consumer_offsets-20' (kafka.log.LogManager) [2018-05-15 04:05:22,853] INFO [Log partition=__consumer_offsets-2, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-20 named files [2018-05-15 04:05:22,867] INFO [Log partition=__consumer_offsets-19, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 139 ms (kafka.log.Log) [2018-05-15 04:05:22,868] INFO [Log partition=__consumer_offsets-2, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 129 ms (kafka.log.Log) [2018-05-15 04:05:22,871] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:22,871] DEBUG Loading log '__consumer_offsets-21' (kafka.log.LogManager) [2018-05-15 04:05:22,873] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:22,874] DEBUG Loading log '__consumer_offsets-22' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) kafka.log.Log$.apply(Log.scala:1747) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) kafka.log.Log.loadSegmentFiles(Log.scala:320) java.util.concurrent.FutureTask.run(FutureTask.java:266) kafka.log.Log.loadSegments(Log.scala:403) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) kafka.log.Log.(Log.scala:216) java.lang.Thread.run(Thread.java:748) kafka.log.Log$.apply(Log.scala:1747) C:\kafka1\__consumer_offsets-22 named files kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-21 named files [2018-05-15 04:05:22,900] WARN [Log partition=__consumer_offsets-20, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-20\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-20\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,915] WARN [Log partition=__consumer_offsets-21, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-21\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-21\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,915] WARN [Log partition=__consumer_offsets-22, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-22\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-22\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:22,949] INFO [Log partition=__consumer_offsets-20, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,953] INFO [Log partition=__consumer_offsets-21, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,953] INFO [Log partition=__consumer_offsets-22, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:22,990] INFO [Log partition=__consumer_offsets-20, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,994] INFO [Log partition=__consumer_offsets-21, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:22,995] INFO [Log partition=__consumer_offsets-22, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,004] INFO [Log partition=__consumer_offsets-20, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 146 ms (kafka.log.Log) [2018-05-15 04:05:23,007] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,007] DEBUG Loading log '__consumer_offsets-23' (kafka.log.LogManager) [2018-05-15 04:05:23,009] INFO [Log partition=__consumer_offsets-22, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 131 ms (kafka.log.Log) [2018-05-15 04:05:23,011] INFO [Log partition=__consumer_offsets-21, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 136 ms (kafka.log.Log) [2018-05-15 04:05:23,012] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,012] DEBUG Loading log '__consumer_offsets-24' (kafka.log.LogManager) [2018-05-15 04:05:23,013] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,014] DEBUG Loading log '__consumer_offsets-25' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-23 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-24 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-25 named files [2018-05-15 04:05:23,045] WARN [Log partition=__consumer_offsets-23, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-23\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-23\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,051] WARN [Log partition=__consumer_offsets-24, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-24\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-24\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,054] WARN [Log partition=__consumer_offsets-25, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-25\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-25\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,081] INFO [Log partition=__consumer_offsets-23, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,084] INFO [Log partition=__consumer_offsets-24, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,087] INFO [Log partition=__consumer_offsets-25, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,121] INFO [Log partition=__consumer_offsets-23, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,124] INFO [Log partition=__consumer_offsets-24, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,127] INFO [Log partition=__consumer_offsets-25, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,135] INFO [Log partition=__consumer_offsets-23, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 124 ms (kafka.log.Log) [2018-05-15 04:05:23,137] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,138] DEBUG Loading log '__consumer_offsets-26' (kafka.log.LogManager) [2018-05-15 04:05:23,138] INFO [Log partition=__consumer_offsets-24, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 122 ms (kafka.log.Log) [2018-05-15 04:05:23,141] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,141] DEBUG Loading log '__consumer_offsets-27' (kafka.log.LogManager) [2018-05-15 04:05:23,141] INFO [Log partition=__consumer_offsets-25, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 123 ms (kafka.log.Log) [2018-05-15 04:05:23,144] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,144] DEBUG Loading log '__consumer_offsets-28' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-26 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-27 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-28 named files [2018-05-15 04:05:23,177] WARN [Log partition=__consumer_offsets-26, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-26\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-26\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,182] WARN [Log partition=__consumer_offsets-27, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-27\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-27\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,187] WARN [Log partition=__consumer_offsets-28, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-28\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-28\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,212] INFO [Log partition=__consumer_offsets-26, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,217] INFO [Log partition=__consumer_offsets-27, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,222] INFO [Log partition=__consumer_offsets-28, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,252] INFO [Log partition=__consumer_offsets-26, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,258] INFO [Log partition=__consumer_offsets-27, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,262] INFO [Log partition=__consumer_offsets-28, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,265] INFO [Log partition=__consumer_offsets-26, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 123 ms (kafka.log.Log) [2018-05-15 04:05:23,268] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,268] DEBUG Loading log '__consumer_offsets-29' (kafka.log.LogManager) [2018-05-15 04:05:23,274] INFO [Log partition=__consumer_offsets-27, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 127 ms (kafka.log.Log) [2018-05-15 04:05:23,276] INFO [Log partition=__consumer_offsets-28, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 128 ms (kafka.log.Log) [2018-05-15 04:05:23,277] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,277] DEBUG Loading log '__consumer_offsets-3' (kafka.log.LogManager) [2018-05-15 04:05:23,278] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,279] DEBUG Loading log '__consumer_offsets-30' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-29 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) (&*(&*(&)(&()*&)(Log Segment Opened by.. kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) java.lang.Thread.getStackTrace(Thread.java:1559) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) kafka.log.LogSegment$.open(LogSegment.scala:560) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) kafka.log.Log.loadSegments(Log.scala:403) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) kafka.log.Log.(Log.scala:216) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) C:\kafka1\__consumer_offsets-30 named files java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-3 named files [2018-05-15 04:05:23,308] WARN [Log partition=__consumer_offsets-29, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-29\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-29\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,316] WARN [Log partition=__consumer_offsets-30, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-30\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-30\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,316] WARN [Log partition=__consumer_offsets-3, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-3\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-3\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,343] INFO [Log partition=__consumer_offsets-29, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,348] INFO [Log partition=__consumer_offsets-3, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,348] INFO [Log partition=__consumer_offsets-30, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,390] INFO [Log partition=__consumer_offsets-29, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,390] INFO [Log partition=__consumer_offsets-3, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,396] INFO [Log partition=__consumer_offsets-30, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,405] INFO [Log partition=__consumer_offsets-29, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 133 ms (kafka.log.Log) [2018-05-15 04:05:23,407] INFO [Log partition=__consumer_offsets-3, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 126 ms (kafka.log.Log) [2018-05-15 04:05:23,408] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,408] DEBUG Loading log '__consumer_offsets-31' (kafka.log.LogManager) [2018-05-15 04:05:23,409] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,410] DEBUG Loading log '__consumer_offsets-32' (kafka.log.LogManager) [2018-05-15 04:05:23,411] INFO [Log partition=__consumer_offsets-30, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 129 ms (kafka.log.Log) [2018-05-15 04:05:23,419] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,419] DEBUG Loading log '__consumer_offsets-33' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-31 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) (&*(&*(&)(&()*&)(Log Segment Opened by.. scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log.(Log.scala:216) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$.apply(Log.scala:1747) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) java.util.concurrent.FutureTask.run(FutureTask.java:266) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) kafka.log.Log.loadSegmentFiles(Log.scala:320) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) kafka.log.Log.loadSegments(Log.scala:403) C:\kafka1\__consumer_offsets-32 named files kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-33 named files [2018-05-15 04:05:23,454] WARN [Log partition=__consumer_offsets-31, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-31\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-31\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,463] WARN [Log partition=__consumer_offsets-33, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-33\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-33\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,468] WARN [Log partition=__consumer_offsets-32, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-32\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-32\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,487] INFO [Log partition=__consumer_offsets-31, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,499] INFO [Log partition=__consumer_offsets-33, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,504] INFO [Log partition=__consumer_offsets-32, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,529] INFO [Log partition=__consumer_offsets-31, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,539] INFO [Log partition=__consumer_offsets-33, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,543] INFO [Log partition=__consumer_offsets-31, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 131 ms (kafka.log.Log) [2018-05-15 04:05:23,546] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,546] DEBUG Loading log '__consumer_offsets-34' (kafka.log.LogManager) [2018-05-15 04:05:23,548] INFO [Log partition=__consumer_offsets-32, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,553] INFO [Log partition=__consumer_offsets-33, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 130 ms (kafka.log.Log) [2018-05-15 04:05:23,555] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,556] DEBUG Loading log '__consumer_offsets-35' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-34 named files [2018-05-15 04:05:23,564] INFO [Log partition=__consumer_offsets-32, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 141 ms (kafka.log.Log) [2018-05-15 04:05:23,566] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,567] DEBUG Loading log '__consumer_offsets-36' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-35 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-36 named files [2018-05-15 04:05:23,582] WARN [Log partition=__consumer_offsets-34, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-34\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-34\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,592] WARN [Log partition=__consumer_offsets-35, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-35\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-35\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,604] WARN [Log partition=__consumer_offsets-36, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-36\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-36\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,617] INFO [Log partition=__consumer_offsets-34, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,628] INFO [Log partition=__consumer_offsets-35, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,640] INFO [Log partition=__consumer_offsets-36, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,660] INFO [Log partition=__consumer_offsets-34, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,673] INFO [Log partition=__consumer_offsets-35, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,677] INFO [Log partition=__consumer_offsets-34, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 127 ms (kafka.log.Log) [2018-05-15 04:05:23,679] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,679] DEBUG Loading log '__consumer_offsets-37' (kafka.log.LogManager) [2018-05-15 04:05:23,685] INFO [Log partition=__consumer_offsets-36, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,688] INFO [Log partition=__consumer_offsets-35, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 129 ms (kafka.log.Log) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) [2018-05-15 04:05:23,690] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) [2018-05-15 04:05:23,690] DEBUG Loading log '__consumer_offsets-38' (kafka.log.LogManager) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-37 named files [2018-05-15 04:05:23,701] INFO [Log partition=__consumer_offsets-36, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 131 ms (kafka.log.Log) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) [2018-05-15 04:05:23,703] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) [2018-05-15 04:05:23,703] DEBUG Loading log '__consumer_offsets-39' (kafka.log.LogManager) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-38 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) [2018-05-15 04:05:23,718] WARN [Log partition=__consumer_offsets-37, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-37\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-37\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-39 named files [2018-05-15 04:05:23,729] WARN [Log partition=__consumer_offsets-38, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-38\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-38\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,744] WARN [Log partition=__consumer_offsets-39, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-39\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-39\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,753] INFO [Log partition=__consumer_offsets-37, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,764] INFO [Log partition=__consumer_offsets-38, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,779] INFO [Log partition=__consumer_offsets-39, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,793] INFO [Log partition=__consumer_offsets-37, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,806] INFO [Log partition=__consumer_offsets-38, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,808] INFO [Log partition=__consumer_offsets-37, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 126 ms (kafka.log.Log) [2018-05-15 04:05:23,810] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,810] DEBUG Loading log '__consumer_offsets-4' (kafka.log.LogManager) [2018-05-15 04:05:23,820] INFO [Log partition=__consumer_offsets-39, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,821] INFO [Log partition=__consumer_offsets-38, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 127 ms (kafka.log.Log) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) [2018-05-15 04:05:23,824] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) [2018-05-15 04:05:23,824] DEBUG Loading log '__consumer_offsets-40' (kafka.log.LogManager) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-4 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [2018-05-15 04:05:23,837] INFO [Log partition=__consumer_offsets-39, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 128 ms (kafka.log.Log) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-40 named files [2018-05-15 04:05:23,839] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,840] DEBUG Loading log '__consumer_offsets-41' (kafka.log.LogManager) [2018-05-15 04:05:23,851] WARN [Log partition=__consumer_offsets-4, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-4\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-4\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-41 named files [2018-05-15 04:05:23,865] WARN [Log partition=__consumer_offsets-40, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-40\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-40\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,888] WARN [Log partition=__consumer_offsets-41, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-41\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-41\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:23,893] INFO [ProducerStateManager partition=__consumer_offsets-4] Writing producer snapshot at offset 10 (kafka.log.ProducerStateManager) [2018-05-15 04:05:23,900] INFO [Log partition=__consumer_offsets-40, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,900] INFO [Log partition=__consumer_offsets-4, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,930] INFO [Log partition=__consumer_offsets-41, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:23,944] INFO [ProducerStateManager partition=__consumer_offsets-4] Writing producer snapshot at offset 10 (kafka.log.ProducerStateManager) [2018-05-15 04:05:23,951] INFO [Log partition=__consumer_offsets-40, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,960] INFO [Log partition=__consumer_offsets-4, dir=C:\kafka1] Loading producer state from offset 10 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:23,966] INFO [Log partition=__consumer_offsets-40, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 138 ms (kafka.log.Log) [2018-05-15 04:05:23,968] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,968] DEBUG Loading log '__consumer_offsets-42' (kafka.log.LogManager) [2018-05-15 04:05:23,972] INFO [ProducerStateManager partition=__consumer_offsets-4] Loading producer state from snapshot file 'C:\kafka1\__consumer_offsets-4\00000000000000000010.snapshot' (kafka.log.ProducerStateManager) [2018-05-15 04:05:23,973] INFO [Log partition=__consumer_offsets-4, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 10 in 159 ms (kafka.log.Log) [2018-05-15 04:05:23,975] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,975] DEBUG Loading log '__consumer_offsets-43' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) [2018-05-15 04:05:23,979] INFO [Log partition=__consumer_offsets-41, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-42 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-43 named files [2018-05-15 04:05:23,994] INFO [Log partition=__consumer_offsets-41, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 148 ms (kafka.log.Log) [2018-05-15 04:05:23,998] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:23,998] DEBUG Loading log '__consumer_offsets-44' (kafka.log.LogManager) [2018-05-15 04:05:24,005] WARN [Log partition=__consumer_offsets-42, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-42\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-42\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:24,012] WARN [Log partition=__consumer_offsets-43, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-43\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-43\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-44 named files [2018-05-15 04:05:24,038] WARN [Log partition=__consumer_offsets-44, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-44\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-44\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:24,040] INFO [Log partition=__consumer_offsets-42, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:24,045] INFO [Log partition=__consumer_offsets-43, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:24,071] INFO [ProducerStateManager partition=__consumer_offsets-44] Writing producer snapshot at offset 11 (kafka.log.ProducerStateManager) [2018-05-15 04:05:24,078] INFO [Log partition=__consumer_offsets-44, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:24,082] INFO [Log partition=__consumer_offsets-42, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:24,086] INFO [Log partition=__consumer_offsets-43, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:24,096] INFO [Log partition=__consumer_offsets-42, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 125 ms (kafka.log.Log) [2018-05-15 04:05:24,098] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,098] DEBUG Loading log '__consumer_offsets-45' (kafka.log.LogManager) [2018-05-15 04:05:24,101] INFO [Log partition=__consumer_offsets-43, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 122 ms (kafka.log.Log) [2018-05-15 04:05:24,103] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,103] DEBUG Loading log '__consumer_offsets-46' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-45 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-46 named files [2018-05-15 04:05:24,120] INFO [ProducerStateManager partition=__consumer_offsets-44] Writing producer snapshot at offset 11 (kafka.log.ProducerStateManager) [2018-05-15 04:05:24,142] WARN [Log partition=__consumer_offsets-46, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-46\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-46\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:24,143] WARN [Log partition=__consumer_offsets-45, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-45\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-45\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:24,145] INFO [Log partition=__consumer_offsets-44, dir=C:\kafka1] Loading producer state from offset 11 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:24,158] INFO [ProducerStateManager partition=__consumer_offsets-44] Loading producer state from snapshot file 'C:\kafka1\__consumer_offsets-44\00000000000000000011.snapshot' (kafka.log.ProducerStateManager) [2018-05-15 04:05:24,159] INFO [Log partition=__consumer_offsets-44, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 11 in 157 ms (kafka.log.Log) [2018-05-15 04:05:24,160] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,161] DEBUG Loading log '__consumer_offsets-47' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-47 named files [2018-05-15 04:05:24,176] INFO [Log partition=__consumer_offsets-46, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:24,176] INFO [Log partition=__consumer_offsets-45, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:24,199] WARN [Log partition=__consumer_offsets-47, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-47\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-47\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:24,217] INFO [Log partition=__consumer_offsets-46, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:24,221] INFO [Log partition=__consumer_offsets-45, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:24,232] INFO [Log partition=__consumer_offsets-46, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 125 ms (kafka.log.Log) [2018-05-15 04:05:24,234] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,235] DEBUG Loading log '__consumer_offsets-48' (kafka.log.LogManager) [2018-05-15 04:05:24,236] INFO [Log partition=__consumer_offsets-47, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:24,236] INFO [Log partition=__consumer_offsets-45, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 134 ms (kafka.log.Log) [2018-05-15 04:05:24,237] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,238] DEBUG Loading log '__consumer_offsets-49' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-48 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-49 named files [2018-05-15 04:05:24,270] WARN [Log partition=__consumer_offsets-48, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-48\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-48\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:24,273] WARN [Log partition=__consumer_offsets-49, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-49\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-49\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:24,274] INFO [Log partition=__consumer_offsets-47, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:24,289] INFO [Log partition=__consumer_offsets-47, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 125 ms (kafka.log.Log) [2018-05-15 04:05:24,290] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,290] DEBUG Loading log '__consumer_offsets-5' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-5 named files [2018-05-15 04:05:24,304] INFO [Log partition=__consumer_offsets-48, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:24,309] INFO [Log partition=__consumer_offsets-49, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:24,340] WARN [Log partition=__consumer_offsets-5, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-5\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-5\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:24,352] INFO [Log partition=__consumer_offsets-49, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:24,359] INFO [Log partition=__consumer_offsets-48, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:24,367] INFO [Log partition=__consumer_offsets-49, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 126 ms (kafka.log.Log) [2018-05-15 04:05:24,369] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,369] DEBUG Loading log '__consumer_offsets-6' (kafka.log.LogManager) [2018-05-15 04:05:24,374] INFO [Log partition=__consumer_offsets-48, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 136 ms (kafka.log.Log) [2018-05-15 04:05:24,378] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,378] DEBUG Loading log '__consumer_offsets-7' (kafka.log.LogManager) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) [2018-05-15 04:05:24,381] INFO [Log partition=__consumer_offsets-5, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-6 named files (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-7 named files [2018-05-15 04:05:24,406] WARN [Log partition=__consumer_offsets-6, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-6\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-6\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:24,415] WARN [Log partition=__consumer_offsets-7, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-7\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-7\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:24,428] INFO [Log partition=__consumer_offsets-5, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:24,446] INFO [Log partition=__consumer_offsets-6, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:24,448] INFO [Log partition=__consumer_offsets-5, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 154 ms (kafka.log.Log) [2018-05-15 04:05:24,453] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,453] DEBUG Loading log '__consumer_offsets-8' (kafka.log.LogManager) [2018-05-15 04:05:24,455] INFO [Log partition=__consumer_offsets-7, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-8 named files [2018-05-15 04:05:24,491] WARN [Log partition=__consumer_offsets-8, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-8\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-8\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:24,502] INFO [Log partition=__consumer_offsets-6, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:24,503] INFO [Log partition=__consumer_offsets-7, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:24,517] INFO [Log partition=__consumer_offsets-6, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 144 ms (kafka.log.Log) [2018-05-15 04:05:24,518] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,519] DEBUG Loading log '__consumer_offsets-9' (kafka.log.LogManager) [2018-05-15 04:05:24,520] INFO [Log partition=__consumer_offsets-7, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 137 ms (kafka.log.Log) [2018-05-15 04:05:24,521] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,525] INFO [Log partition=__consumer_offsets-8, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) (&*(&*(&)(&()*&)(Log Segment Opened by.. java.lang.Thread.getStackTrace(Thread.java:1559) kafka.log.LogSegment$.open(LogSegment.scala:560) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:333) kafka.log.Log$$anonfun$loadSegmentFiles$3.apply(Log.scala:320) scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) kafka.log.Log.loadSegmentFiles(Log.scala:320) kafka.log.Log.loadSegments(Log.scala:403) kafka.log.Log.(Log.scala:216) kafka.log.Log$.apply(Log.scala:1747) kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:255) kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:335) kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) java.util.concurrent.FutureTask.run(FutureTask.java:266) java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) java.lang.Thread.run(Thread.java:748) C:\kafka1\__consumer_offsets-9 named files [2018-05-15 04:05:24,558] WARN [Log partition=__consumer_offsets-9, dir=C:\kafka1] Found a corrupted index file corresponding to log file C:\kafka1\__consumer_offsets-9\00000000000000000000.log due to Corrupt index found, index file (C:\kafka1\__consumer_offsets-9\00000000000000000000.index) has non-zero size but the last offset is 0 which is no greater than the base offset 0.}, recovering segment and rebuilding index files... (kafka.log.Log) [2018-05-15 04:05:24,563] INFO [Log partition=__consumer_offsets-8, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:24,578] INFO [Log partition=__consumer_offsets-8, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 121 ms (kafka.log.Log) [2018-05-15 04:05:24,579] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,589] INFO [Log partition=__consumer_offsets-9, dir=C:\kafka1] Recovering unflushed segment 0 (kafka.log.Log) [2018-05-15 04:05:24,626] INFO [Log partition=__consumer_offsets-9, dir=C:\kafka1] Loading producer state from offset 0 with message format version 2 (kafka.log.Log) [2018-05-15 04:05:24,640] INFO [Log partition=__consumer_offsets-9, dir=C:\kafka1] Completed load of log with 1 segments, log start offset 0 and log end offset 0 in 117 ms (kafka.log.Log) [2018-05-15 04:05:24,642] DEBUG Scheduling task PeriodicProducerExpirationCheck with initial delay 600000 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,644] INFO Logs loading complete in 2599 ms. (kafka.log.LogManager) [2018-05-15 04:05:24,657] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager) [2018-05-15 04:05:24,658] DEBUG Scheduling task kafka-log-retention with initial delay 30000 ms and period 300000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,659] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager) [2018-05-15 04:05:24,660] DEBUG Scheduling task kafka-log-flusher with initial delay 30000 ms and period 9223372036854775807 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,660] DEBUG Scheduling task kafka-recovery-point-checkpoint with initial delay 30000 ms and period 60000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,661] DEBUG Scheduling task kafka-log-start-offset-checkpoint with initial delay 30000 ms and period 60000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,662] DEBUG Scheduling task kafka-delete-logs with initial delay 30000 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:24,663] INFO Starting the log cleaner (kafka.log.LogCleaner) [2018-05-15 04:05:24,743] INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner) [2018-05-15 04:05:25,019] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.Acceptor) [2018-05-15 04:05:25,069] INFO [SocketServer brokerId=1] Started 1 acceptor threads (kafka.network.SocketServer) [2018-05-15 04:05:25,103] INFO [ExpirationReaper-1-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2018-05-15 04:05:25,103] INFO [ExpirationReaper-1-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2018-05-15 04:05:25,103] INFO [ExpirationReaper-1-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2018-05-15 04:05:25,115] DEBUG Scheduling task isr-expiration with initial delay 0 ms and period 5000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:25,116] DEBUG Scheduling task isr-change-propagation with initial delay 0 ms and period 2500 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:25,118] DEBUG Scheduling task shutdown-idle-replica-alter-log-dirs-thread with initial delay 0 ms and period 10000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:25,130] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler) [2018-05-15 04:05:25,211] INFO Creating /brokers/ids/1 (is it secure? false) (kafka.zk.KafkaZkClient) [2018-05-15 04:05:25,220] INFO Result of znode creation at /brokers/ids/1 is: OK (kafka.zk.KafkaZkClient) [2018-05-15 04:05:25,221] INFO Registered broker 1 at path /brokers/ids/1 with addresses: ArrayBuffer(EndPoint(localhost,9092,ListenerName(PLAINTEXT),PLAINTEXT)) (kafka.zk.KafkaZkClient) [2018-05-15 04:05:25,279] INFO [ExpirationReaper-1-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2018-05-15 04:05:25,283] INFO Creating /controller (is it secure? false) (kafka.zk.KafkaZkClient) [2018-05-15 04:05:25,289] INFO [ExpirationReaper-1-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2018-05-15 04:05:25,290] INFO [ExpirationReaper-1-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) [2018-05-15 04:05:25,293] INFO Result of znode creation at /controller is: OK (kafka.zk.KafkaZkClient) [2018-05-15 04:05:25,330] INFO [GroupCoordinator 1]: Starting up. (kafka.coordinator.group.GroupCoordinator) [2018-05-15 04:05:25,330] DEBUG Initializing task scheduler. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:25,331] DEBUG Scheduling task delete-expired-group-metadata with initial delay 0 ms and period 600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:25,332] INFO [GroupCoordinator 1]: Startup complete. (kafka.coordinator.group.GroupCoordinator) [2018-05-15 04:05:25,334] INFO [GroupMetadataManager brokerId=1] Removed 0 expired offsets in 2 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:25,354] INFO [ProducerId Manager 1]: Acquired new producerId block (brokerId:1,blockStartProducerId:26000,blockEndProducerId:26999) by writing to Zk with path version 27 (kafka.coordinator.transaction.ProducerIdManager) [2018-05-15 04:05:25,384] INFO [TransactionCoordinator id=1] Starting up. (kafka.coordinator.transaction.TransactionCoordinator) [2018-05-15 04:05:25,385] DEBUG Initializing task scheduler. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:25,385] DEBUG Scheduling task transaction-abort with initial delay 60000 ms and period 60000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:25,387] DEBUG Scheduling task transactionalId-expiration with initial delay 3600000 ms and period 3600000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:25,388] INFO [TransactionCoordinator id=1] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator) [2018-05-15 04:05:25,424] INFO [Transaction Marker Channel Manager 1]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager) [2018-05-15 04:05:25,487] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) [2018-05-15 04:05:25,488] INFO Kafka version : 1.1.0 (org.apache.kafka.common.utils.AppInfoParser) [2018-05-15 04:05:25,488] INFO Kafka commitId : fdcf75ea326b8e07 (org.apache.kafka.common.utils.AppInfoParser) [2018-05-15 04:05:25,490] INFO [KafkaServer id=1] started (kafka.server.KafkaServer) [2018-05-15 04:05:25,655] DEBUG Initializing task scheduler. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:25,656] DEBUG Scheduling task auto-leader-rebalance-task with initial delay 5000 ms and period -1000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:25,693] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions __consumer_offsets-22,__consumer_offsets-30,__consumer_offsets-8,__consumer_offsets-21,__consumer_offsets-4,__consumer_offsets-27,__consumer_offsets-7,__consumer_offsets-9,__consumer_offsets-46,__consumer_offsets-25,__consumer_offsets-35,__consumer_offsets-41,__consumer_offsets-33,__consumer_offsets-23,__consumer_offsets-49,__consumer_offsets-47,__consumer_offsets-16,test-0,__consumer_offsets-28,__consumer_offsets-31,__consumer_offsets-36,__consumer_offsets-42,__consumer_offsets-3,__consumer_offsets-18,test1-0,__consumer_offsets-37,__consumer_offsets-15,__consumer_offsets-24,__consumer_offsets-38,__consumer_offsets-17,__consumer_offsets-48,__consumer_offsets-19,__consumer_offsets-11,__consumer_offsets-13,__consumer_offsets-2,__consumer_offsets-43,__consumer_offsets-6,__consumer_offsets-14,__consumer_offsets-20,__consumer_offsets-0,__consumer_offsets-44,__consumer_offsets-39,__consumer_offsets-12,__consumer_offsets-45,__consumer_offsets-1,__consumer_offsets-5,__consumer_offsets-26,__consumer_offsets-29,__consumer_offsets-34,__consumer_offsets-10,__consumer_offsets-32,__consumer_offsets-40 (kafka.server.ReplicaFetcherManager) [2018-05-15 04:05:25,708] INFO Replica loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,712] INFO [Partition __consumer_offsets-0 broker=1] __consumer_offsets-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,733] INFO Replica loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,734] INFO [Partition __consumer_offsets-29 broker=1] __consumer_offsets-29 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,739] INFO Replica loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,739] INFO [Partition __consumer_offsets-48 broker=1] __consumer_offsets-48 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,745] INFO Replica loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,745] INFO [Partition __consumer_offsets-10 broker=1] __consumer_offsets-10 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,750] INFO Replica loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,750] INFO [Partition __consumer_offsets-45 broker=1] __consumer_offsets-45 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,756] INFO Replica loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,757] INFO [Partition __consumer_offsets-26 broker=1] __consumer_offsets-26 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,762] INFO Replica loaded for partition test1-0 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,762] INFO [Partition test1-0 broker=1] test1-0 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,766] INFO Replica loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,773] INFO [Partition __consumer_offsets-7 broker=1] __consumer_offsets-7 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,782] INFO Replica loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,783] INFO [Partition __consumer_offsets-42 broker=1] __consumer_offsets-42 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,787] INFO Replica loaded for partition __consumer_offsets-4 with initial high watermark 10 (kafka.cluster.Replica) [2018-05-15 04:05:25,787] INFO [Partition __consumer_offsets-4 broker=1] __consumer_offsets-4 starts at Leader Epoch 0 from offset 10. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,798] INFO Replica loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,798] INFO [Partition __consumer_offsets-23 broker=1] __consumer_offsets-23 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,804] INFO Replica loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,804] INFO [Partition __consumer_offsets-1 broker=1] __consumer_offsets-1 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,814] INFO Replica loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,814] INFO [Partition __consumer_offsets-39 broker=1] __consumer_offsets-39 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,819] INFO Replica loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,819] INFO [Partition __consumer_offsets-20 broker=1] __consumer_offsets-20 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,823] INFO Replica loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,823] INFO [Partition __consumer_offsets-17 broker=1] __consumer_offsets-17 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,827] INFO Replica loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,828] INFO [Partition __consumer_offsets-36 broker=1] __consumer_offsets-36 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,844] INFO Replica loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,844] INFO [Partition __consumer_offsets-14 broker=1] __consumer_offsets-14 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,850] INFO Replica loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,851] INFO [Partition __consumer_offsets-33 broker=1] __consumer_offsets-33 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,856] INFO Replica loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,857] INFO [Partition __consumer_offsets-49 broker=1] __consumer_offsets-49 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,865] INFO Replica loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,865] INFO [Partition __consumer_offsets-11 broker=1] __consumer_offsets-11 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,871] INFO Replica loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,871] INFO [Partition __consumer_offsets-30 broker=1] __consumer_offsets-30 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,875] INFO Replica loaded for partition test-0 with initial high watermark 45 (kafka.cluster.Replica) [2018-05-15 04:05:25,875] INFO [Partition test-0 broker=1] test-0 starts at Leader Epoch 0 from offset 45. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,879] INFO Replica loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,879] INFO [Partition __consumer_offsets-46 broker=1] __consumer_offsets-46 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,883] INFO Replica loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,884] INFO [Partition __consumer_offsets-27 broker=1] __consumer_offsets-27 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,888] INFO Replica loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,888] INFO [Partition __consumer_offsets-8 broker=1] __consumer_offsets-8 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,891] INFO Replica loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,892] INFO [Partition __consumer_offsets-24 broker=1] __consumer_offsets-24 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,896] INFO Replica loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,896] INFO [Partition __consumer_offsets-43 broker=1] __consumer_offsets-43 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,902] INFO Replica loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,903] INFO [Partition __consumer_offsets-5 broker=1] __consumer_offsets-5 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,907] INFO Replica loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,907] INFO [Partition __consumer_offsets-21 broker=1] __consumer_offsets-21 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,911] INFO Replica loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,911] INFO [Partition __consumer_offsets-2 broker=1] __consumer_offsets-2 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,914] INFO Replica loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,915] INFO [Partition __consumer_offsets-40 broker=1] __consumer_offsets-40 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,919] INFO Replica loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,919] INFO [Partition __consumer_offsets-37 broker=1] __consumer_offsets-37 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,925] INFO Replica loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,925] INFO [Partition __consumer_offsets-18 broker=1] __consumer_offsets-18 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,930] INFO Replica loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,930] INFO [Partition __consumer_offsets-34 broker=1] __consumer_offsets-34 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,935] INFO Replica loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,935] INFO [Partition __consumer_offsets-15 broker=1] __consumer_offsets-15 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,941] INFO Replica loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,941] INFO [Partition __consumer_offsets-12 broker=1] __consumer_offsets-12 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,947] INFO Replica loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,947] INFO [Partition __consumer_offsets-31 broker=1] __consumer_offsets-31 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,950] INFO Replica loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,951] INFO [Partition __consumer_offsets-9 broker=1] __consumer_offsets-9 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,956] INFO Replica loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,956] INFO [Partition __consumer_offsets-47 broker=1] __consumer_offsets-47 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,960] INFO Replica loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,961] INFO [Partition __consumer_offsets-19 broker=1] __consumer_offsets-19 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,964] INFO Replica loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,965] INFO [Partition __consumer_offsets-28 broker=1] __consumer_offsets-28 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,968] INFO Replica loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,968] INFO [Partition __consumer_offsets-38 broker=1] __consumer_offsets-38 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,971] INFO Replica loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,972] INFO [Partition __consumer_offsets-35 broker=1] __consumer_offsets-35 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,975] INFO Replica loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,975] INFO [Partition __consumer_offsets-6 broker=1] __consumer_offsets-6 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,979] INFO Replica loaded for partition __consumer_offsets-44 with initial high watermark 11 (kafka.cluster.Replica) [2018-05-15 04:05:25,979] INFO [Partition __consumer_offsets-44 broker=1] __consumer_offsets-44 starts at Leader Epoch 0 from offset 11. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,983] INFO Replica loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,983] INFO [Partition __consumer_offsets-25 broker=1] __consumer_offsets-25 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,986] INFO Replica loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,987] INFO [Partition __consumer_offsets-16 broker=1] __consumer_offsets-16 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,990] INFO Replica loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,991] INFO [Partition __consumer_offsets-22 broker=1] __consumer_offsets-22 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,995] INFO Replica loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,996] INFO [Partition __consumer_offsets-41 broker=1] __consumer_offsets-41 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:25,999] INFO Replica loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:25,999] INFO [Partition __consumer_offsets-32 broker=1] __consumer_offsets-32 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:26,002] INFO Replica loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:26,002] INFO [Partition __consumer_offsets-3 broker=1] __consumer_offsets-3 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:26,005] INFO Replica loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Replica) [2018-05-15 04:05:26,006] INFO [Partition __consumer_offsets-13 broker=1] __consumer_offsets-13 starts at Leader Epoch 0 from offset 0. Previous Leader Epoch was: -1 (kafka.cluster.Partition) [2018-05-15 04:05:26,014] DEBUG Scheduling task highwatermark-checkpoint with initial delay 0 ms and period 5000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,025] INFO [ReplicaAlterLogDirsManager on broker 1] Added fetcher for partitions List() (kafka.server.ReplicaAlterLogDirsManager) [2018-05-15 04:05:26,027] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-22 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,028] DEBUG Scheduling task __consumer_offsets-22 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,028] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-25 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,029] DEBUG Scheduling task __consumer_offsets-25 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,029] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-28 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,029] DEBUG Scheduling task __consumer_offsets-28 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,029] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-31 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,029] DEBUG Scheduling task __consumer_offsets-31 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,030] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-34 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,030] DEBUG Scheduling task __consumer_offsets-34 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,030] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-37 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,030] DEBUG Scheduling task __consumer_offsets-37 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,030] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-40 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,030] DEBUG Scheduling task __consumer_offsets-40 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,031] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-43 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,031] DEBUG Scheduling task __consumer_offsets-43 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,031] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-46 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,031] DEBUG Scheduling task __consumer_offsets-46 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,031] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-49 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,031] DEBUG Scheduling task __consumer_offsets-49 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,032] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-41 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,032] DEBUG Scheduling task __consumer_offsets-41 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,032] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-44 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,032] DEBUG Scheduling task __consumer_offsets-44 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,032] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-47 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,032] DEBUG Scheduling task __consumer_offsets-47 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,033] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-1 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,033] DEBUG Scheduling task __consumer_offsets-1 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,033] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-4 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,033] DEBUG Scheduling task __consumer_offsets-4 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,033] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-7 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,033] DEBUG Scheduling task __consumer_offsets-7 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,033] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-10 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,034] DEBUG Scheduling task __consumer_offsets-10 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,034] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-13 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,034] DEBUG Scheduling task __consumer_offsets-13 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,034] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-16 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,034] DEBUG Scheduling task __consumer_offsets-16 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,034] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-19 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,035] DEBUG Scheduling task __consumer_offsets-19 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,035] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-2 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,035] DEBUG Scheduling task __consumer_offsets-2 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,035] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-5 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,035] DEBUG Scheduling task __consumer_offsets-5 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,035] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-8 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,036] DEBUG Scheduling task __consumer_offsets-8 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,036] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-11 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,036] DEBUG Scheduling task __consumer_offsets-11 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,036] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-14 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,036] DEBUG Scheduling task __consumer_offsets-14 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,036] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-17 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,037] DEBUG Scheduling task __consumer_offsets-17 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,037] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-20 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,037] DEBUG Scheduling task __consumer_offsets-20 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,037] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-23 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,037] DEBUG Scheduling task __consumer_offsets-23 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,037] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-26 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,037] DEBUG Scheduling task __consumer_offsets-26 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,038] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-29 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,038] DEBUG Scheduling task __consumer_offsets-29 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,038] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-32 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,038] DEBUG Scheduling task __consumer_offsets-32 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,038] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-35 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,038] DEBUG Scheduling task __consumer_offsets-35 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,039] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-38 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,039] DEBUG Scheduling task __consumer_offsets-38 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,039] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-0 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,039] DEBUG Scheduling task __consumer_offsets-0 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,039] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-3 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,039] DEBUG Scheduling task __consumer_offsets-3 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,040] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-6 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,040] DEBUG Scheduling task __consumer_offsets-6 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,040] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-9 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,040] DEBUG Scheduling task __consumer_offsets-9 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,040] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-12 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,040] DEBUG Scheduling task __consumer_offsets-12 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,041] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-15 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,041] DEBUG Scheduling task __consumer_offsets-15 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,041] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-18 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,041] DEBUG Scheduling task __consumer_offsets-18 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,041] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-21 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,041] DEBUG Scheduling task __consumer_offsets-21 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,041] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-24 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,042] DEBUG Scheduling task __consumer_offsets-24 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,042] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-27 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,042] DEBUG Scheduling task __consumer_offsets-27 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,042] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-30 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,042] DEBUG Scheduling task __consumer_offsets-30 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,042] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-33 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,042] DEBUG Scheduling task __consumer_offsets-33 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,043] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-36 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,043] DEBUG Scheduling task __consumer_offsets-36 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,043] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-39 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,043] DEBUG Scheduling task __consumer_offsets-39 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,043] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-42 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,043] DEBUG Scheduling task __consumer_offsets-42 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,043] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-45 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,044] DEBUG Scheduling task __consumer_offsets-45 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,044] INFO [GroupMetadataManager brokerId=1] Scheduling loading of offsets and group metadata from __consumer_offsets-48 (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,044] DEBUG Scheduling task __consumer_offsets-48 with initial delay 0 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:26,058] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-22 in 27 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,064] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-25 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,064] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-28 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,065] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-31 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,065] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-34 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,065] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-37 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,066] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-40 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,066] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-43 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,066] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-46 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,067] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-49 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,067] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-41 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,114] INFO [GroupCoordinator 1]: Loading group metadata for console-consumer-91157 with generation 2 (kafka.coordinator.group.GroupCoordinator) [2018-05-15 04:05:26,115] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-44 in 48 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,116] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-47 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,116] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-1 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,122] INFO [GroupCoordinator 1]: Loading group metadata for console-consumer-23648 with generation 2 (kafka.coordinator.group.GroupCoordinator) [2018-05-15 04:05:26,122] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-4 in 6 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,122] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-7 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,123] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-10 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,123] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-13 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,123] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-16 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,123] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-19 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,124] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-2 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,124] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-5 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,124] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-8 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,124] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-11 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,125] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-14 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,125] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-17 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,125] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-20 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,126] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-23 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,126] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-26 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,126] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-29 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,127] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-32 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,127] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-35 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,127] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-38 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,128] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-0 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,128] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-3 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,128] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-6 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,129] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-9 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,129] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-12 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,129] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-15 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,129] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-18 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,130] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-21 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,130] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-24 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,131] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-27 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,131] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-30 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,131] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-33 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,132] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-36 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,132] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-39 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,133] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-42 in 1 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,133] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-45 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:26,133] INFO [GroupMetadataManager brokerId=1] Finished loading offsets and group metadata from __consumer_offsets-48 in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager) [2018-05-15 04:05:30,665] DEBUG Scheduling task auto-leader-rebalance-task with initial delay 300000 ms and period -1000 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:54,659] DEBUG Beginning log cleanup... (kafka.log.LogManager) [2018-05-15 04:05:54,660] DEBUG Garbage collecting 'test1-0' (kafka.log.LogManager) [2018-05-15 04:05:54,662] DEBUG Checking for dirty logs to flush... (kafka.log.LogManager) [2018-05-15 04:05:54,663] DEBUG Scheduling task kafka-delete-logs with initial delay 60000 ms and period -1 ms. (kafka.utils.KafkaScheduler) [2018-05-15 04:05:54,664] DEBUG Garbage collecting 'test-0' (kafka.log.LogManager) [2018-05-15 04:05:54,664] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522453 time since last flush: 32211 (kafka.log.LogManager) [2018-05-15 04:05:54,665] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353524104 time since last flush: 30561 (kafka.log.LogManager) [2018-05-15 04:05:54,665] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353524519 time since last flush: 30146 (kafka.log.LogManager) [2018-05-15 04:05:54,665] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523968 time since last flush: 30697 (kafka.log.LogManager) [2018-05-15 04:05:54,665] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522872 time since last flush: 31793 (kafka.log.LogManager) [2018-05-15 04:05:54,665] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522605 time since last flush: 32060 (kafka.log.LogManager) [2018-05-15 04:05:54,665] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523279 time since last flush: 31386 (kafka.log.LogManager) [2018-05-15 04:05:54,666] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523138 time since last flush: 31528 (kafka.log.LogManager) [2018-05-15 04:05:54,666] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353524291 time since last flush: 30375 (kafka.log.LogManager) [2018-05-15 04:05:54,666] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523691 time since last flush: 30975 (kafka.log.LogManager) [2018-05-15 04:05:54,666] INFO [Log partition=test-0, dir=C:\kafka1] Found deletable segments with base offsets [0] due to retention time 720000ms breach (kafka.log.Log) [2018-05-15 04:05:54,666] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522313 time since last flush: 32353 (kafka.log.LogManager) [2018-05-15 04:05:54,666] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523546 time since last flush: 31120 (kafka.log.LogManager) [2018-05-15 04:05:54,666] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522592 time since last flush: 32074 (kafka.log.LogManager) [2018-05-15 04:05:54,667] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353524099 time since last flush: 30568 (kafka.log.LogManager) [2018-05-15 04:05:54,667] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522452 time since last flush: 32215 (kafka.log.LogManager) [2018-05-15 04:05:54,667] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523840 time since last flush: 30827 (kafka.log.LogManager) [2018-05-15 04:05:54,667] INFO [Log partition=test-0, dir=C:\kafka1] Scheduling log segment [baseOffset 0, size 2290] for deletion. (kafka.log.Log) [2018-05-15 04:05:54,667] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523013 time since last flush: 31654 (kafka.log.LogManager) [2018-05-15 04:05:54,667] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522854 time since last flush: 31813 (kafka.log.LogManager) [2018-05-15 04:05:54,668] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353524238 time since last flush: 30430 (kafka.log.LogManager) [2018-05-15 04:05:54,668] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522089 time since last flush: 32579 (kafka.log.LogManager) [2018-05-15 04:05:54,668] DEBUG Checking if flush is needed on test1 flush interval 9223372036854775807 last flushed 1526353522089 time since last flush: 32579 (kafka.log.LogManager) [2018-05-15 04:05:54,668] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523268 time since last flush: 31400 (kafka.log.LogManager) [2018-05-15 04:05:54,668] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523014 time since last flush: 31654 (kafka.log.LogManager) [2018-05-15 04:05:54,668] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353524453 time since last flush: 30215 (kafka.log.LogManager) [2018-05-15 04:05:54,668] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523679 time since last flush: 30989 (kafka.log.LogManager) [2018-05-15 04:05:54,669] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523811 time since last flush: 30858 (kafka.log.LogManager) [2018-05-15 04:05:54,669] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523420 time since last flush: 31249 (kafka.log.LogManager) [2018-05-15 04:05:54,669] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522582 time since last flush: 32087 (kafka.log.LogManager) [2018-05-15 04:05:54,669] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353524235 time since last flush: 30434 (kafka.log.LogManager) [2018-05-15 04:05:54,686] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522328 time since last flush: 32358 (kafka.log.LogManager) [2018-05-15 04:05:54,686] ERROR Outer Exception occured for ATOMIC_MOVE (org.apache.kafka.common.utils.Utils) java.nio.file.FileSystemException: C:\kafka1\test-0\00000000000000000000.log -> C:\kafka1\test-0\00000000000000000000.log.deleted: The process cannot access the file because it is being used by another process. at sun.nio.fs.WindowsException.translateToIOException(WindowsException.java:86) at sun.nio.fs.WindowsException.rethrowAsIOException(WindowsException.java:97) at sun.nio.fs.WindowsFileCopy.move(WindowsFileCopy.java:301) at sun.nio.fs.WindowsFileSystemProvider.move(WindowsFileSystemProvider.java:287) at java.nio.file.Files.move(Files.java:1395) at org.apache.kafka.common.utils.Utils.atomicMoveWithFallback(Utils.java:696) at org.apache.kafka.common.record.FileRecords.renameTo(FileRecords.java:212) at kafka.log.LogSegment.changeFileSuffixes(LogSegment.scala:416) at kafka.log.Log.kafka$log$Log$$asyncDeleteSegment(Log.scala:1601) at kafka.log.Log.kafka$log$Log$$deleteSegment(Log.scala:1588) at kafka.log.Log$$anonfun$deleteSegments$1$$anonfun$apply$mcI$sp$1.apply(Log.scala:1170) at kafka.log.Log$$anonfun$deleteSegments$1$$anonfun$apply$mcI$sp$1.apply(Log.scala:1170) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) at kafka.log.Log$$anonfun$deleteSegments$1.apply$mcI$sp(Log.scala:1170) at kafka.log.Log$$anonfun$deleteSegments$1.apply(Log.scala:1161) at kafka.log.Log$$anonfun$deleteSegments$1.apply(Log.scala:1161) at kafka.log.Log.maybeHandleIOException(Log.scala:1678) at kafka.log.Log.deleteSegments(Log.scala:1161) at kafka.log.Log.deleteOldSegments(Log.scala:1156) at kafka.log.Log.deleteRetentionMsBreachedSegments(Log.scala:1228) at kafka.log.Log.deleteOldSegments(Log.scala:1222) at kafka.log.LogManager$$anonfun$cleanupLogs$3.apply(LogManager.scala:854) at kafka.log.LogManager$$anonfun$cleanupLogs$3.apply(LogManager.scala:852) at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) at scala.collection.immutable.List.foreach(List.scala:392) at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) at kafka.log.LogManager.cleanupLogs(LogManager.scala:852) at kafka.log.LogManager$$anonfun$startup$1.apply$mcV$sp(LogManager.scala:385) at kafka.utils.KafkaScheduler$$anonfun$1.apply$mcV$sp(KafkaScheduler.scala:110) at kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) [2018-05-15 04:05:54,686] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523998 time since last flush: 30688 (kafka.log.LogManager) [2018-05-15 04:05:54,694] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523008 time since last flush: 31686 (kafka.log.LogManager) [2018-05-15 04:05:54,694] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522725 time since last flush: 31969 (kafka.log.LogManager) [2018-05-15 04:05:54,695] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523410 time since last flush: 31285 (kafka.log.LogManager) [2018-05-15 04:05:54,695] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523144 time since last flush: 31551 (kafka.log.LogManager) [2018-05-15 04:05:54,695] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353524379 time since last flush: 30316 (kafka.log.LogManager) [2018-05-15 04:05:54,695] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523825 time since last flush: 30870 (kafka.log.LogManager) [2018-05-15 04:05:54,695] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523278 time since last flush: 31417 (kafka.log.LogManager) [2018-05-15 04:05:54,696] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523567 time since last flush: 31129 (kafka.log.LogManager) [2018-05-15 04:05:54,697] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353524161 time since last flush: 30536 (kafka.log.LogManager) [2018-05-15 04:05:54,697] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522464 time since last flush: 32233 (kafka.log.LogManager) [2018-05-15 04:05:54,697] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523976 time since last flush: 30721 (kafka.log.LogManager) [2018-05-15 04:05:54,697] DEBUG Checking if flush is needed on test flush interval 9223372036854775807 last flushed 1526353522089 time since last flush: 32608 (kafka.log.LogManager) [2018-05-15 04:05:54,698] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522322 time since last flush: 32376 (kafka.log.LogManager) [2018-05-15 04:05:54,698] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522874 time since last flush: 31824 (kafka.log.LogManager) [2018-05-15 04:05:54,698] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522720 time since last flush: 31978 (kafka.log.LogManager) [2018-05-15 04:05:54,698] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523408 time since last flush: 31290 (kafka.log.LogManager) [2018-05-15 04:05:54,699] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523141 time since last flush: 31558 (kafka.log.LogManager) [2018-05-15 04:05:54,699] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523703 time since last flush: 30996 (kafka.log.LogManager) [2018-05-15 04:05:54,699] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353524370 time since last flush: 30329 (kafka.log.LogManager) [2018-05-15 04:05:54,699] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353523556 time since last flush: 31143 (kafka.log.LogManager) [2018-05-15 04:05:54,700] DEBUG Checking if flush is needed on __consumer_offsets flush interval 9223372036854775807 last flushed 1526353522736 time since last flush: 31964 (kafka.log.LogManager) [2018-05-15 04:05:54,702] ERROR Thread Locks... main : class java.lang.ref.SoftReference java.lang.ref.SoftReference@7159c725 main : class [Lsun.nio.fs.NativeBuffer; [Lsun.nio.fs.NativeBuffer;@685cee2 main : class com.yammer.metrics.stats.ThreadLocalRandom com.yammer.metrics.stats.ThreadLocalRandom@69f82438 main : class sun.nio.ch.Util$BufferCache sun.nio.ch.Util$BufferCache@6e4236e3 main : class java.lang.ref.SoftReference java.lang.ref.SoftReference@511db4e1 main : class sun.misc.FloatingDecimal$BinaryToASCIIBuffer sun.misc.FloatingDecimal$BinaryToASCIIBuffer@794792fe main : class java.lang.ref.SoftReference java.lang.ref.SoftReference@5136b00a (org.apache.kafka.common.utils.Utils) [2018-05-15 04:05:54,705] ERROR Error while deleting segments for test-0 in dir C:\kafka1 (kafka.server.LogDirFailureChannel) java.nio.file.FileSystemException: C:\kafka1\test-0\00000000000000000000.log -> C:\kafka1\test-0\00000000000000000000.log.deleted: The process cannot access the file because it is being used by another process. at sun.nio.fs.WindowsException.translateToIOException(WindowsException.java:86) at sun.nio.fs.WindowsException.rethrowAsIOException(WindowsException.java:97) at sun.nio.fs.WindowsFileCopy.move(WindowsFileCopy.java:387) at sun.nio.fs.WindowsFileSystemProvider.move(WindowsFileSystemProvider.java:287) at java.nio.file.Files.move(Files.java:1395) at org.apache.kafka.common.utils.Utils.atomicMoveWithFallback(Utils.java:706) at org.apache.kafka.common.record.FileRecords.renameTo(FileRecords.java:212) at kafka.log.LogSegment.changeFileSuffixes(LogSegment.scala:416) at kafka.log.Log.kafka$log$Log$$asyncDeleteSegment(Log.scala:1601) at kafka.log.Log.kafka$log$Log$$deleteSegment(Log.scala:1588) at kafka.log.Log$$anonfun$deleteSegments$1$$anonfun$apply$mcI$sp$1.apply(Log.scala:1170) at kafka.log.Log$$anonfun$deleteSegments$1$$anonfun$apply$mcI$sp$1.apply(Log.scala:1170) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) at kafka.log.Log$$anonfun$deleteSegments$1.apply$mcI$sp(Log.scala:1170) at kafka.log.Log$$anonfun$deleteSegments$1.apply(Log.scala:1161) at kafka.log.Log$$anonfun$deleteSegments$1.apply(Log.scala:1161) at kafka.log.Log.maybeHandleIOException(Log.scala:1678) at kafka.log.Log.deleteSegments(Log.scala:1161) at kafka.log.Log.deleteOldSegments(Log.scala:1156) at kafka.log.Log.deleteRetentionMsBreachedSegments(Log.scala:1228) at kafka.log.Log.deleteOldSegments(Log.scala:1222) at kafka.log.LogManager$$anonfun$cleanupLogs$3.apply(LogManager.scala:854) at kafka.log.LogManager$$anonfun$cleanupLogs$3.apply(LogManager.scala:852) at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) at scala.collection.immutable.List.foreach(List.scala:392) at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) at kafka.log.LogManager.cleanupLogs(LogManager.scala:852) at kafka.log.LogManager$$anonfun$startup$1.apply$mcV$sp(LogManager.scala:385) at kafka.utils.KafkaScheduler$$anonfun$1.apply$mcV$sp(KafkaScheduler.scala:110) at kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Suppressed: java.nio.file.FileSystemException: C:\kafka1\test-0\00000000000000000000.log -> C:\kafka1\test-0\00000000000000000000.log.deleted: The process cannot access the file because it is being used by another process. at sun.nio.fs.WindowsException.translateToIOException(WindowsException.java:86) at sun.nio.fs.WindowsException.rethrowAsIOException(WindowsException.java:97) at sun.nio.fs.WindowsFileCopy.move(WindowsFileCopy.java:301) at sun.nio.fs.WindowsFileSystemProvider.move(WindowsFileSystemProvider.java:287) at java.nio.file.Files.move(Files.java:1395) at org.apache.kafka.common.utils.Utils.atomicMoveWithFallback(Utils.java:696) ... 32 more [2018-05-15 04:05:54,707] ERROR Uncaught exception in scheduled task 'kafka-log-retention' (kafka.utils.KafkaScheduler) org.apache.kafka.common.errors.KafkaStorageException: Error while deleting segments for test-0 in dir C:\kafka1 Caused by: java.nio.file.FileSystemException: C:\kafka1\test-0\00000000000000000000.log -> C:\kafka1\test-0\00000000000000000000.log.deleted: The process cannot access the file because it is being used by another process. at sun.nio.fs.WindowsException.translateToIOException(WindowsException.java:86) at sun.nio.fs.WindowsException.rethrowAsIOException(WindowsException.java:97) at sun.nio.fs.WindowsFileCopy.move(WindowsFileCopy.java:387) at sun.nio.fs.WindowsFileSystemProvider.move(WindowsFileSystemProvider.java:287) at java.nio.file.Files.move(Files.java:1395) at org.apache.kafka.common.utils.Utils.atomicMoveWithFallback(Utils.java:706) at org.apache.kafka.common.record.FileRecords.renameTo(FileRecords.java:212) at kafka.log.LogSegment.changeFileSuffixes(LogSegment.scala:416) at kafka.log.Log.kafka$log$Log$$asyncDeleteSegment(Log.scala:1601) at kafka.log.Log.kafka$log$Log$$deleteSegment(Log.scala:1588) at kafka.log.Log$$anonfun$deleteSegments$1$$anonfun$apply$mcI$sp$1.apply(Log.scala:1170) at kafka.log.Log$$anonfun$deleteSegments$1$$anonfun$apply$mcI$sp$1.apply(Log.scala:1170) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) at kafka.log.Log$$anonfun$deleteSegments$1.apply$mcI$sp(Log.scala:1170) at kafka.log.Log$$anonfun$deleteSegments$1.apply(Log.scala:1161) at kafka.log.Log$$anonfun$deleteSegments$1.apply(Log.scala:1161) at kafka.log.Log.maybeHandleIOException(Log.scala:1678) at kafka.log.Log.deleteSegments(Log.scala:1161) at kafka.log.Log.deleteOldSegments(Log.scala:1156) at kafka.log.Log.deleteRetentionMsBreachedSegments(Log.scala:1228) at kafka.log.Log.deleteOldSegments(Log.scala:1222) at kafka.log.LogManager$$anonfun$cleanupLogs$3.apply(LogManager.scala:854) at kafka.log.LogManager$$anonfun$cleanupLogs$3.apply(LogManager.scala:852) at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) at scala.collection.immutable.List.foreach(List.scala:392) at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) at kafka.log.LogManager.cleanupLogs(LogManager.scala:852) at kafka.log.LogManager$$anonfun$startup$1.apply$mcV$sp(LogManager.scala:385) at kafka.utils.KafkaScheduler$$anonfun$1.apply$mcV$sp(KafkaScheduler.scala:110) at kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:62) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Suppressed: java.nio.file.FileSystemException: C:\kafka1\test-0\00000000000000000000.log -> C:\kafka1\test-0\00000000000000000000.log.deleted: The process cannot access the file because it is being used by another process. at sun.nio.fs.WindowsException.translateToIOException(WindowsException.java:86) at sun.nio.fs.WindowsException.rethrowAsIOException(WindowsException.java:97) at sun.nio.fs.WindowsFileCopy.move(WindowsFileCopy.java:301) at sun.nio.fs.WindowsFileSystemProvider.move(WindowsFileSystemProvider.java:287) at java.nio.file.Files.move(Files.java:1395) at org.apache.kafka.common.utils.Utils.atomicMoveWithFallback(Utils.java:696) ... 32 more [2018-05-15 04:05:54,708] INFO [ReplicaManager broker=1] Stopping serving replicas in dir C:\kafka1 (kafka.server.ReplicaManager) [2018-05-15 04:05:54,712] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions __consumer_offsets-22,__consumer_offsets-30,__consumer_offsets-8,__consumer_offsets-21,__consumer_offsets-4,__consumer_offsets-27,__consumer_offsets-7,__consumer_offsets-9,__consumer_offsets-46,__consumer_offsets-25,__consumer_offsets-35,__consumer_offsets-41,__consumer_offsets-33,__consumer_offsets-23,__consumer_offsets-49,__consumer_offsets-47,__consumer_offsets-16,test-0,__consumer_offsets-28,__consumer_offsets-31,__consumer_offsets-36,__consumer_offsets-42,__consumer_offsets-3,__consumer_offsets-18,test1-0,__consumer_offsets-37,__consumer_offsets-15,__consumer_offsets-24,__consumer_offsets-38,__consumer_offsets-17,__consumer_offsets-48,__consumer_offsets-19,__consumer_offsets-11,__consumer_offsets-13,__consumer_offsets-2,__consumer_offsets-43,__consumer_offsets-6,__consumer_offsets-14,__consumer_offsets-20,__consumer_offsets-0,__consumer_offsets-44,__consumer_offsets-39,__consumer_offsets-12,__consumer_offsets-45,__consumer_offsets-1,__consumer_offsets-5,__consumer_offsets-26,__consumer_offsets-29,__consumer_offsets-34,__consumer_offsets-10,__consumer_offsets-32,__consumer_offsets-40 (kafka.server.ReplicaFetcherManager) [2018-05-15 04:05:54,714] INFO [ReplicaAlterLogDirsManager on broker 1] Removed fetcher for partitions __consumer_offsets-22,__consumer_offsets-30,__consumer_offsets-8,__consumer_offsets-21,__consumer_offsets-4,__consumer_offsets-27,__consumer_offsets-7,__consumer_offsets-9,__consumer_offsets-46,__consumer_offsets-25,__consumer_offsets-35,__consumer_offsets-41,__consumer_offsets-33,__consumer_offsets-23,__consumer_offsets-49,__consumer_offsets-47,__consumer_offsets-16,test-0,__consumer_offsets-28,__consumer_offsets-31,__consumer_offsets-36,__consumer_offsets-42,__consumer_offsets-3,__consumer_offsets-18,test1-0,__consumer_offsets-37,__consumer_offsets-15,__consumer_offsets-24,__consumer_offsets-38,__consumer_offsets-17,__consumer_offsets-48,__consumer_offsets-19,__consumer_offsets-11,__consumer_offsets-13,__consumer_offsets-2,__consumer_offsets-43,__consumer_offsets-6,__consumer_offsets-14,__consumer_offsets-20,__consumer_offsets-0,__consumer_offsets-44,__consumer_offsets-39,__consumer_offsets-12,__consumer_offsets-45,__consumer_offsets-1,__consumer_offsets-5,__consumer_offsets-26,__consumer_offsets-29,__consumer_offsets-34,__consumer_offsets-10,__consumer_offsets-32,__consumer_offsets-40 (kafka.server.ReplicaAlterLogDirsManager) [2018-05-15 04:05:54,759] INFO [ReplicaManager broker=1] Broker 1 stopped fetcher for partitions __consumer_offsets-22,__consumer_offsets-30,__consumer_offsets-8,__consumer_offsets-21,__consumer_offsets-4,__consumer_offsets-27,__consumer_offsets-7,__consumer_offsets-9,__consumer_offsets-46,__consumer_offsets-25,__consumer_offsets-35,__consumer_offsets-41,__consumer_offsets-33,__consumer_offsets-23,__consumer_offsets-49,__consumer_offsets-47,__consumer_offsets-16,test-0,__consumer_offsets-28,__consumer_offsets-31,__consumer_offsets-36,__consumer_offsets-42,__consumer_offsets-3,__consumer_offsets-18,test1-0,__consumer_offsets-37,__consumer_offsets-15,__consumer_offsets-24,__consumer_offsets-38,__consumer_offsets-17,__consumer_offsets-48,__consumer_offsets-19,__consumer_offsets-11,__consumer_offsets-13,__consumer_offsets-2,__consumer_offsets-43,__consumer_offsets-6,__consumer_offsets-14,__consumer_offsets-20,__consumer_offsets-0,__consumer_offsets-44,__consumer_offsets-39,__consumer_offsets-12,__consumer_offsets-45,__consumer_offsets-1,__consumer_offsets-5,__consumer_offsets-26,__consumer_offsets-29,__consumer_offsets-34,__consumer_offsets-10,__consumer_offsets-32,__consumer_offsets-40 and stopped moving logs for partitions because they are in the failed log directory C:\kafka1. (kafka.server.ReplicaManager) [2018-05-15 04:05:54,760] INFO Stopping serving logs in dir C:\kafka1 (kafka.log.LogManager) [2018-05-15 04:05:54,765] ERROR Shutdown broker because all log dirs in C:\kafka1 have failed (kafka.log.LogManager)