diff --git a/core/src/main/scala/kafka/admin/AdminUtils.scala b/core/src/main/scala/kafka/admin/AdminUtils.scala index b9ef4dc..7c06330 100644 --- a/core/src/main/scala/kafka/admin/AdminUtils.scala +++ b/core/src/main/scala/kafka/admin/AdminUtils.scala @@ -24,6 +24,7 @@ import kafka.utils.{Logging, ZkUtils} import org.I0Itec.zkclient.ZkClient import org.I0Itec.zkclient.exception.ZkNodeExistsException import scala.collection._ +import mutable.ListBuffer import scala.collection.mutable import kafka.common._ import scala.Some @@ -143,19 +144,27 @@ object AdminUtils extends Logging { private def getBrokerInfoFromCache(zkClient: ZkClient, cachedBrokerInfo: scala.collection.mutable.Map[Int, Broker], brokerIds: Seq[Int]): Seq[Broker] = { - brokerIds.map { id => + var failedBrokerIds: ListBuffer[Int] = new ListBuffer() + val brokerMetadata = brokerIds.map { id => val optionalBrokerInfo = cachedBrokerInfo.get(id) optionalBrokerInfo match { - case Some(brokerInfo) => brokerInfo // return broker info from the cache + case Some(brokerInfo) => Some(brokerInfo) // return broker info from the cache case None => // fetch it from zookeeper ZkUtils.getBrokerInfo(zkClient, id) match { case Some(brokerInfo) => cachedBrokerInfo += (id -> brokerInfo) - brokerInfo - case None => throw new BrokerNotAvailableException("Failed to fetch broker info for broker " + id) + Some(brokerInfo) + case None => + failedBrokerIds += id + None } } } + if(failedBrokerIds.size > 0) + throw new BrokerNotAvailableException("Failed to fetch broker metadata for brokers " + failedBrokerIds.mkString(",")) + else + brokerMetadata.dropWhile(!_.isDefined).map(_.get) + } private def getWrappedIndex(firstReplicaIndex: Int, secondReplicaShift: Int, replicaIndex: Int, nBrokers: Int): Int = { diff --git a/core/src/main/scala/kafka/producer/BrokerPartitionInfo.scala b/core/src/main/scala/kafka/producer/BrokerPartitionInfo.scala index 617fc43..8bdf92f 100644 --- a/core/src/main/scala/kafka/producer/BrokerPartitionInfo.scala +++ b/core/src/main/scala/kafka/producer/BrokerPartitionInfo.scala @@ -80,12 +80,12 @@ class BrokerPartitionInfo(producerConfig: ProducerConfig, if(tmd.errorCode == ErrorMapping.NoError){ topicPartitionInfo.put(tmd.topic, tmd) } else - warn("Error while fetching metadata for topic [%s]: [%s]".format(tmd.topic, tmd), ErrorMapping.exceptionFor(tmd.errorCode)) + warn("Error while fetching metadata [%s] for topic [%s]: %s ".format(tmd, tmd.topic, ErrorMapping.exceptionFor(tmd.errorCode).getClass)) tmd.partitionsMetadata.foreach(pmd =>{ - if (pmd.errorCode != ErrorMapping.NoError){ - warn("Error while fetching metadata for topic partition [%s,%d]: [%s]".format(tmd.topic, pmd.partitionId, pmd), - ErrorMapping.exceptionFor(pmd.errorCode)) - } + if (pmd.errorCode != ErrorMapping.NoError && pmd.errorCode == ErrorMapping.LeaderNotAvailableCode) { + warn("Error while fetching metadata %s for topic partition [%s,%d]: [%s]".format(pmd, tmd.topic, pmd.partitionId, + ErrorMapping.exceptionFor(tmd.errorCode).getClass)) + } // any other error code (e.g. ReplicaNotAvailable) can be ignored since the producer does not need to access the replica and isr metadata }) }) producerPool.updateProducer(topicsMetadata) diff --git a/core/src/main/scala/kafka/producer/ProducerPool.scala b/core/src/main/scala/kafka/producer/ProducerPool.scala index 4970029..43df70b 100644 --- a/core/src/main/scala/kafka/producer/ProducerPool.scala +++ b/core/src/main/scala/kafka/producer/ProducerPool.scala @@ -43,9 +43,9 @@ class ProducerPool(val config: ProducerConfig) extends Logging { private val syncProducers = new HashMap[Int, SyncProducer] private val lock = new Object() - def updateProducer(topicMetadatas: Seq[TopicMetadata]) { + def updateProducer(topicMetadata: Seq[TopicMetadata]) { val newBrokers = new collection.mutable.HashSet[Broker] - topicMetadatas.foreach(tmd => { + topicMetadata.foreach(tmd => { tmd.partitionsMetadata.foreach(pmd => { if(pmd.leader.isDefined) newBrokers+=(pmd.leader.get)