jolshan commented on code in PR #15176:
URL: https://github.com/apache/kafka/pull/15176#discussion_r1470096146


##########
core/src/main/scala/kafka/coordinator/group/GroupCoordinator.scala:
##########
@@ -935,8 +935,12 @@ private[group] class GroupCoordinator(
           producerId,
           producerEpoch,
           RecordBatch.NO_SEQUENCE,
-          requestLocal,
-          postVerificationCallback
+          // Wrap the callback to be handled on an arbitrary request handler
+          // thread when transaction verification is complete.

Review Comment:
   Should we leave a comment that the requestLocal passed in is only for the 
case where we execute immediately?



##########
core/src/main/scala/kafka/server/ReplicaManager.scala:
##########
@@ -982,24 +996,21 @@ class ReplicaManager(val config: KafkaConfig,
     producerId: Long,
     producerEpoch: Short,
     baseSequence: Int,
-    requestLocal: RequestLocal,
-    callback: (Errors, RequestLocal, VerificationGuard) => Unit
+    callback: ((Errors, VerificationGuard)) => Unit
   ): Unit = {
-    def generalizedCallback(preAppendErrors: Map[TopicPartition, Errors],
-                            newRequestLocal: RequestLocal,
-                            verificationGuards: Map[TopicPartition, 
VerificationGuard]): Unit = {
-      callback(
+    def generalizedCallback(results: (Map[TopicPartition, Errors], 
Map[TopicPartition, VerificationGuard])): Unit = {
+      val (preAppendErrors, verificationGuards) = results
+      callback((

Review Comment:
   nit: are the double `((` needed? I see it elsewhere so maybe I'm missing an 
aspect of the language.



##########
core/src/main/scala/kafka/server/ReplicaManager.scala:
##########
@@ -813,19 +816,19 @@ class ReplicaManager(val config: KafkaConfig,
 
     val transactionalProducerInfo = mutable.HashSet[(Long, Short)]()
     val topicPartitionBatchInfo = mutable.Map[TopicPartition, Int]()
-    entriesPerPartition.foreach { case (topicPartition, records) =>
+    entriesPerPartition.forKeyValue { (topicPartition, records) =>
       // Produce requests (only requests that require verification) should 
only have one batch per partition in "batches" but check all just to be safe.
       val transactionalBatches = records.batches.asScala.filter(batch => 
batch.hasProducerId && batch.isTransactional)
       transactionalBatches.foreach(batch => 
transactionalProducerInfo.add(batch.producerId, batch.producerEpoch))
-      if (!transactionalBatches.isEmpty) 
topicPartitionBatchInfo.put(topicPartition, records.firstBatch.baseSequence)
+      if (transactionalBatches.nonEmpty) 
topicPartitionBatchInfo.put(topicPartition, records.firstBatch.baseSequence)
     }
     if (transactionalProducerInfo.size > 1) {
       throw new InvalidPidMappingException("Transactional records contained 
more than one producer ID")
     }
 
-    def postVerificationCallback(preAppendErrors: Map[TopicPartition, Errors],
-                                 newRequestLocal: RequestLocal,
-                                 verificationGuards: Map[TopicPartition, 
VerificationGuard]): Unit = {
+    def postVerificationCallback(newRequestLocal: RequestLocal,
+                                 results: (Map[TopicPartition, Errors], 
Map[TopicPartition, VerificationGuard])): Unit = {

Review Comment:
   I don't have a problem with this refactor, but just curious what makes a 
tuple argument better than individual ones.



##########
core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorConcurrencyTest.scala:
##########
@@ -85,6 +85,10 @@ class GroupCoordinatorConcurrencyTest extends 
AbstractCoordinatorConcurrencyTest
     groupCoordinator = GroupCoordinator(config, replicaManager, 
heartbeatPurgatory, rebalancePurgatory, timer.time, metrics)
     groupCoordinator.startup(() => 
zkClient.getTopicPartitionCount(Topic.GROUP_METADATA_TOPIC_NAME).getOrElse(config.offsetsTopicPartitions),
       false)
+
+    // Transactional appends attempt to schedule to the request handler thread 
using
+    // a non request handler thread. Set this to avoid error.
+    KafkaRequestHandler.setBypassThreadCheck(true)

Review Comment:
   Is this needed because we do the wrapper outside replica manager now?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to