StreamsConfig - StreamsConfig values: application.id = fooapp application.server = fooapp.uat.svc.cluster.local:443 bootstrap.servers = [broker01.kafkauat.uat.aws.ad.foo.com:9092, broker03.kafkauat.uat.aws.ad.foo.com:9092, broker05.kafkauat.uat.aws.ad.foo.com:9092] buffered.records.per.partition = 1000 cache.max.bytes.buffering = 10485760 client.id = commit.interval.ms = 30000 connections.max.idle.ms = 540000 default.deserialization.exception.handler = class org.apache.kafka.streams.errors.LogAndFailExceptionHandler default.key.serde = class com.foo.kafka.serdes.StringSerde default.production.exception.handler = class org.apache.kafka.streams.errors.DefaultProductionExceptionHandler default.timestamp.extractor = class org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp default.value.serde = class io.confluent.kafka.streams.serdes.avro.SpecificAvroSerde max.task.idle.ms = 0 metadata.max.age.ms = 300000 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 num.standby.replicas = 1 num.stream.threads = 1 partition.grouper = class org.apache.kafka.streams.processor.DefaultPartitionGrouper poll.ms = 100 processing.guarantee = at_least_once receive.buffer.bytes = 32768 reconnect.backoff.max.ms = 1000 reconnect.backoff.ms = 50 replication.factor = 3 request.timeout.ms = 40000 retries = 0 retry.backoff.ms = 100 rocksdb.config.setter = security.protocol = SSL send.buffer.bytes = 131072 state.cleanup.delay.ms = 300000 state.dir = /data topology.optimization = none upgrade.from = null windowstore.changelog.additional.retention.ms = 3600000