artemlivshits commented on code in PR #14612: URL: https://github.com/apache/kafka/pull/14612#discussion_r1456764730
########## core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala: ########## @@ -141,17 +144,33 @@ class KRaftMetadataCache(val brokerId: Int) extends MetadataCache with Logging w } } + /** + * Return topic partition metadata for the given topic, listener and index range. Also, return a boolean value to + * indicate whether there are more partitions with index equal or larger than the upper index. + * + * @param image The metadata image + * @param topicName The name of the topic. + * @param listenerName The listener name. + * @param startIndex The smallest index of the partitions to be included in the result. + * @param upperIndex The upper limit of the index of the partitions to be included in the result. + * Note that, the upper index can be larger than the largest partition index in + * this topic. + * @return A collection of topic partition metadata and whether there are more partitions. + */ private def getPartitionMetadataForDescribeTopicResponse( image: MetadataImage, topicName: String, - listenerName: ListenerName - ): Option[List[DescribeTopicPartitionsResponsePartition]] = { + listenerName: ListenerName, + startIndex: Int, + upperIndex: Int + ): (Option[List[DescribeTopicPartitionsResponsePartition]], Boolean) = { Option(image.topics().getTopic(topicName)) match { - case None => None + case None => (None, false) case Some(topic) => { - val partitions = Some(topic.partitions().entrySet().asScala.map { entry => - val partitionId = entry.getKey - val partition = entry.getValue + val result = new ListBuffer[DescribeTopicPartitionsResponsePartition]() + val endIndex = upperIndex.min(topic.partitions().size()) + for (partitionId <- startIndex until endIndex) { + val partition = topic.partitions().get(partitionId) Review Comment: The data structure leaves a possibility (due to a bug or a change elsewhere) to have arbitrary numbers. It would be good not to crash if the current assumptions are violated. ########## core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala: ########## @@ -167,64 +167,59 @@ class KRaftMetadataCache(val brokerId: Int) extends MetadataCache with Logging w case None => (None, -1) case Some(topic) => { val result = new ListBuffer[DescribeTopicPartitionsResponsePartition]() - // The partition id may not be consecutive. - val partitions = topic.partitions().keySet().stream().sorted().iterator() - var remaining = maxCount - while (remaining > 0 && partitions.hasNext) { - val partitionId = partitions.next() - if (partitionId >= startIndex) { - remaining -= 1 - val partition = topic.partitions().get(partitionId) - val filteredReplicas = maybeFilterAliveReplicas(image, partition.replicas, - listenerName, false) - val filteredIsr = maybeFilterAliveReplicas(image, partition.isr, listenerName, false) - val offlineReplicas = getOfflineReplicas(image, partition, listenerName) - val maybeLeader = getAliveEndpoint(image, partition.leader, listenerName) - maybeLeader match { - case None => - val error = if (!image.cluster().brokers.containsKey(partition.leader)) { - debug(s"Error while fetching metadata for $topicName-$partitionId: leader not available") - Errors.LEADER_NOT_AVAILABLE - } else { - debug(s"Error while fetching metadata for $topicName-$partitionId: listener $listenerName " + - s"not found on leader ${partition.leader}") - Errors.LISTENER_NOT_FOUND - } - result.addOne(new DescribeTopicPartitionsResponsePartition() - .setErrorCode(error.code) - .setPartitionIndex(partitionId) - .setLeaderId(MetadataResponse.NO_LEADER_ID) - .setLeaderEpoch(partition.leaderEpoch) - .setReplicaNodes(filteredReplicas) - .setIsrNodes(filteredIsr) - .setOfflineReplicas(offlineReplicas)) - case Some(leader) => - val error = if (filteredReplicas.size < partition.replicas.length) { - debug(s"Error while fetching metadata for $topicName-$partitionId: replica information not available for " + - s"following brokers ${partition.replicas.filterNot(filteredReplicas.contains).mkString(",")}") - Errors.REPLICA_NOT_AVAILABLE - } else if (filteredIsr.size < partition.isr.length) { - debug(s"Error while fetching metadata for $topicName-$partitionId: in sync replica information not available for " + - s"following brokers ${partition.isr.filterNot(filteredIsr.contains).mkString(",")}") - Errors.REPLICA_NOT_AVAILABLE - } else { - Errors.NONE - } - - result.addOne(new DescribeTopicPartitionsResponsePartition() - .setErrorCode(error.code) - .setPartitionIndex(partitionId) - .setLeaderId(leader.id()) - .setLeaderEpoch(partition.leaderEpoch) - .setReplicaNodes(filteredReplicas) - .setIsrNodes(filteredIsr) - .setOfflineReplicas(offlineReplicas) - .setEligibleLeaderReplicas(Replicas.toList(partition.elr)) - .setLastKnownElr(Replicas.toList(partition.lastKnownElr))) - } + val partitions = topic.partitions().keySet() Review Comment: Looks like here we just need to remember the size? Or maybe calculate the nextIndex directly here? ########## core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala: ########## @@ -140,6 +141,71 @@ class KRaftMetadataCache(val brokerId: Int) extends MetadataCache with Logging w } } + private def getPartitionMetadataForDescribeTopicResponse( + image: MetadataImage, + topicName: String, + listenerName: ListenerName + ): Option[List[DescribeTopicPartitionsResponsePartition]] = { + Option(image.topics().getTopic(topicName)) match { + case None => None + case Some(topic) => { + val partitions = Some(topic.partitions().entrySet().asScala.map { entry => + val partitionId = entry.getKey + val partition = entry.getValue + val filteredReplicas = maybeFilterAliveReplicas(image, partition.replicas, + listenerName, false) + val filteredIsr = maybeFilterAliveReplicas(image, partition.isr, listenerName, + false) + val offlineReplicas = getOfflineReplicas(image, partition, listenerName) + val maybeLeader = getAliveEndpoint(image, partition.leader, listenerName) + maybeLeader match { + case None => + val error = if (!image.cluster().brokers.containsKey(partition.leader)) { Review Comment: I guess we need to see what the client does with the error code. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org