[GitHub] [kafka] jolshan commented on a diff in pull request #14402: MINOR: Push logic to resolve the transaction coordinator into the AddPartitionsToTxnManager

2023-09-19 Thread via GitHub


jolshan commented on code in PR #14402:
URL: https://github.com/apache/kafka/pull/14402#discussion_r1330542903


##
core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala:
##
@@ -69,7 +69,6 @@ import com.yammer.metrics.core.Gauge
 import kafka.log.remote.RemoteLogManager
 import org.apache.kafka.common.config.AbstractConfig
 import org.apache.kafka.common.internals.Topic
-import 
org.apache.kafka.common.message.AddPartitionsToTxnRequestData.{AddPartitionsToTxnTopic,
 AddPartitionsToTxnTopicCollection, AddPartitionsToTxnTransaction}

Review Comment:
   For our `setUpReplicaManagerWithMockedAddPartitionsToTxnManager` we can 
remove the lines of code that mocks the flow for getting transaction state 
partitions.
   
   val metadataResponseTopic = Seq(new MetadataResponseTopic()
 .setName(Topic.TRANSACTION_STATE_TOPIC_NAME)
 .setPartitions(Seq(
   new MetadataResponsePartition()
 .setPartitionIndex(0)
 .setLeaderId(0)).asJava))
   transactionalTopicPartitions.foreach(tp => 
when(metadataCache.contains(tp)).thenReturn(true))
   
when(metadataCache.getTopicMetadata(Set(Topic.TRANSACTION_STATE_TOPIC_NAME), 
config.interBrokerListenerName)).thenReturn(metadataResponseTopic)
   when(metadataCache.getAliveBrokerNode(0, 
config.interBrokerListenerName)).thenReturn(Some(node))
   when(metadataCache.getAliveBrokerNode(1, 
config.interBrokerListenerName)).thenReturn(None)
   
I think the same code exists for `setupReplicaManagerWithMockedPurgatories` 
  



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [kafka] jolshan commented on a diff in pull request #14402: MINOR: Push logic to resolve the transaction coordinator into the AddPartitionsToTxnManager

2023-09-19 Thread via GitHub


jolshan commented on code in PR #14402:
URL: https://github.com/apache/kafka/pull/14402#discussion_r1330524344


##
core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala:
##
@@ -157,29 +216,114 @@ class AddPartitionsToTxnManagerTest {
 // The request for node1 should not be added because one request is 
already inflight.
 assertEquals(1, requestsAndHandlers2.size)
 requestsAndHandlers2.foreach { requestAndHandler =>
-  verifyRequest(node2, transactionalId3, producerId3, requestAndHandler)
+  verifyRequest(node2, transactionalId3, producerId3, requestAndHandler, 
verifyOnly = false)
 }
 
 // Complete the request for node1 so the new one can go through.
 requestsAndHandlers.filter(_.destination == 
node1).head.handler.onComplete(authenticationErrorResponse)
 val requestsAndHandlers3 = 
addPartitionsToTxnManager.generateRequests().asScala
 assertEquals(1, requestsAndHandlers3.size)
 requestsAndHandlers3.foreach { requestAndHandler =>
-  verifyRequest(node1, transactionalId2, producerId2, requestAndHandler)
+  verifyRequest(node1, transactionalId2, producerId2, requestAndHandler, 
verifyOnly = true)
+}
+  }
+
+  @Test
+  def testTransactionCoordinatorResolution(): Unit = {
+when(partitionFor.apply(transactionalId1)).thenReturn(0)
+
+def checkError(): Unit = {
+  val errors = mutable.Map[TopicPartition, Errors]()
+
+  addPartitionsToTxnManager.addTxnData(
+transactionalId1,
+producerId1,
+producerEpoch = 0,
+verifyOnly = true,
+topicPartitions,
+setErrors(errors)
+  )
+
+  assertEquals(topicPartitions.map(tp => tp -> 
Errors.COORDINATOR_NOT_AVAILABLE).toMap, errors)
 }
+
+// The transaction state topic does not exist.
+
when(metadataCache.getTopicMetadata(Set(Topic.TRANSACTION_STATE_TOPIC_NAME), 
config.interBrokerListenerName))
+  .thenReturn(Seq())
+checkError()
+
+// The metadata of the transaction state topic returns an error.
+
when(metadataCache.getTopicMetadata(Set(Topic.TRANSACTION_STATE_TOPIC_NAME), 
config.interBrokerListenerName))
+  .thenReturn(Seq(
+new MetadataResponseData.MetadataResponseTopic()
+  .setName(Topic.TRANSACTION_STATE_TOPIC_NAME)
+  .setErrorCode(Errors.BROKER_NOT_AVAILABLE.code)
+  ))
+checkError()
+
+// The partition does not exist.
+
when(metadataCache.getTopicMetadata(Set(Topic.TRANSACTION_STATE_TOPIC_NAME), 
config.interBrokerListenerName))
+  .thenReturn(Seq(
+new MetadataResponseData.MetadataResponseTopic()
+  .setName(Topic.TRANSACTION_STATE_TOPIC_NAME)
+  ))
+checkError()
+
+// The partition has not leader.

Review Comment:
   nit: The partition has no leader



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [kafka] jolshan commented on a diff in pull request #14402: MINOR: Push logic to resolve the transaction coordinator into the AddPartitionsToTxnManager

2023-09-19 Thread via GitHub


jolshan commented on code in PR #14402:
URL: https://github.com/apache/kafka/pull/14402#discussion_r1330523078


##
core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala:
##
@@ -68,87 +71,143 @@ class AddPartitionsToTxnManagerTest {
   private val versionMismatchResponse = clientResponse(null, mismatchException 
= new UnsupportedVersionException(""))
   private val disconnectedResponse = clientResponse(null, disconnected = true)
 
+  private val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, 
"localhost:2181"))
+
   @BeforeEach
   def setup(): Unit = {
 addPartitionsToTxnManager = new AddPartitionsToTxnManager(
-  KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:2181")),
+  config,
   networkClient,
-  time)
+  metadataCache,
+  partitionFor,
+  time
+)
   }
 
   @AfterEach
   def teardown(): Unit = {
 addPartitionsToTxnManager.shutdown()
   }
 
-  def setErrors(errors: mutable.Map[TopicPartition, Errors])(callbackErrors: 
Map[TopicPartition, Errors]): Unit = {
-callbackErrors.foreach {
-  case (tp, error) => errors.put(tp, error)
-}
+  private def setErrors(errors: mutable.Map[TopicPartition, 
Errors])(callbackErrors: Map[TopicPartition, Errors]): Unit = {
+callbackErrors.forKeyValue(errors.put)
   }
 
   @Test
   def testAddTxnData(): Unit = {
+when(partitionFor.apply(transactionalId1)).thenReturn(0)
+when(partitionFor.apply(transactionalId2)).thenReturn(1)
+when(partitionFor.apply(transactionalId3)).thenReturn(0)
+
when(metadataCache.getTopicMetadata(Set(Topic.TRANSACTION_STATE_TOPIC_NAME), 
config.interBrokerListenerName))
+  .thenReturn(Seq(
+new MetadataResponseData.MetadataResponseTopic()
+  .setName(Topic.TRANSACTION_STATE_TOPIC_NAME)
+  .setPartitions(List(
+new MetadataResponseData.MetadataResponsePartition()
+  .setPartitionIndex(0)
+  .setLeaderId(0),
+new MetadataResponseData.MetadataResponsePartition()
+  .setPartitionIndex(1)
+  .setLeaderId(1)
+  ).asJava)
+  ))
+when(metadataCache.getAliveBrokerNode(0, config.interBrokerListenerName))
+  .thenReturn(Some(node0))
+when(metadataCache.getAliveBrokerNode(1, config.interBrokerListenerName))
+  .thenReturn(Some(node1))
+
 val transaction1Errors = mutable.Map[TopicPartition, Errors]()
 val transaction2Errors = mutable.Map[TopicPartition, Errors]()
 val transaction3Errors = mutable.Map[TopicPartition, Errors]()
 
-addPartitionsToTxnManager.addTxnData(node0, 
transactionData(transactionalId1, producerId1), setErrors(transaction1Errors))
-addPartitionsToTxnManager.addTxnData(node1, 
transactionData(transactionalId2, producerId2), setErrors(transaction2Errors))
-addPartitionsToTxnManager.addTxnData(node0, 
transactionData(transactionalId3, producerId3), setErrors(transaction3Errors))
+addPartitionsToTxnManager.addTxnData(transactionalId1, producerId1, 
producerEpoch = 0, verifyOnly = true, topicPartitions, 
setErrors(transaction1Errors))
+addPartitionsToTxnManager.addTxnData(transactionalId2, producerId2, 
producerEpoch = 0, verifyOnly = true, topicPartitions, 
setErrors(transaction2Errors))
+addPartitionsToTxnManager.addTxnData(transactionalId3, producerId3, 
producerEpoch = 0, verifyOnly = true, topicPartitions, 
setErrors(transaction3Errors))
 
 // We will try to add transaction1 3 more times (retries). One will have 
the same epoch, one will have a newer epoch, and one will have an older epoch 
than the new one we just added.
 val transaction1RetryWithSameEpochErrors = mutable.Map[TopicPartition, 
Errors]()
 val transaction1RetryWithNewerEpochErrors = mutable.Map[TopicPartition, 
Errors]()
 val transaction1RetryWithOldEpochErrors = mutable.Map[TopicPartition, 
Errors]()
 
 // Trying to add more transactional data for the same transactional ID, 
producer ID, and epoch should simply replace the old data and send a retriable 
response.
-addPartitionsToTxnManager.addTxnData(node0, 
transactionData(transactionalId1, producerId1), 
setErrors(transaction1RetryWithSameEpochErrors))
+addPartitionsToTxnManager.addTxnData(transactionalId1, producerId1, 
producerEpoch = 0, verifyOnly = true, topicPartitions, 
setErrors(transaction1RetryWithSameEpochErrors))
 val expectedNetworkErrors = topicPartitions.map(_ -> 
Errors.NETWORK_EXCEPTION).toMap
 assertEquals(expectedNetworkErrors, transaction1Errors)
 
 // Trying to add more transactional data for the same transactional ID and 
producer ID, but new epoch should replace the old data and send an error 
response for it.
-addPartitionsToTxnManager.addTxnData(node0, 
transactionData(transactionalId1, producerId1, producerEpoch = 1), 
setErrors(transaction1RetryWithNewerEpochErrors))
+addPartitionsToTxnManager.addTxnData(transactionalId1, producerId1, 
producerEpoch = 

[GitHub] [kafka] jolshan commented on a diff in pull request #14402: MINOR: Push logic to resolve the transaction coordinator into the AddPartitionsToTxnManager

2023-09-19 Thread via GitHub


jolshan commented on code in PR #14402:
URL: https://github.com/apache/kafka/pull/14402#discussion_r1330522384


##
core/src/main/scala/kafka/server/AddPartitionsToTxnManager.scala:
##
@@ -63,7 +73,42 @@ class AddPartitionsToTxnManager(config: KafkaConfig, client: 
NetworkClient, time
   val verificationFailureRate = 
metricsGroup.newMeter(VerificationFailureRateMetricName, "failures", 
TimeUnit.SECONDS)
   val verificationTimeMs = 
metricsGroup.newHistogram(VerificationTimeMsMetricName)
 
-  def addTxnData(node: Node, transactionData: AddPartitionsToTxnTransaction, 
callback: AddPartitionsToTxnManager.AppendCallback): Unit = {
+  def addTxnData(
+transactionalId: String,
+producerId: Long,
+producerEpoch: Short,
+verifyOnly: Boolean,

Review Comment:
   We will add this functionality later, but AddPartitionsToTxnManager 
currently only supports verifyOnly. In part 2, we will send 
AddPartitionsToTxnRequests that actually add the partition.
   
   I suppose this is ok since replicaManager hard codes true for now.



##
core/src/main/scala/kafka/server/AddPartitionsToTxnManager.scala:
##
@@ -63,7 +73,42 @@ class AddPartitionsToTxnManager(config: KafkaConfig, client: 
NetworkClient, time
   val verificationFailureRate = 
metricsGroup.newMeter(VerificationFailureRateMetricName, "failures", 
TimeUnit.SECONDS)
   val verificationTimeMs = 
metricsGroup.newHistogram(VerificationTimeMsMetricName)
 
-  def addTxnData(node: Node, transactionData: AddPartitionsToTxnTransaction, 
callback: AddPartitionsToTxnManager.AppendCallback): Unit = {
+  def addTxnData(
+transactionalId: String,
+producerId: Long,
+producerEpoch: Short,
+verifyOnly: Boolean,

Review Comment:
   We will add this functionality later, but AddPartitionsToTxnManager 
currently only supports verifyOnly=true. In part 2, we will send 
AddPartitionsToTxnRequests that actually add the partition.
   
   I suppose this is ok since replicaManager hard codes true for now.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [kafka] jolshan commented on a diff in pull request #14402: MINOR: Push logic to resolve the transaction coordinator into the AddPartitionsToTxnManager

2023-09-19 Thread via GitHub


jolshan commented on code in PR #14402:
URL: https://github.com/apache/kafka/pull/14402#discussion_r1330519350


##
core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala:
##
@@ -68,87 +71,143 @@ class AddPartitionsToTxnManagerTest {
   private val versionMismatchResponse = clientResponse(null, mismatchException 
= new UnsupportedVersionException(""))
   private val disconnectedResponse = clientResponse(null, disconnected = true)
 
+  private val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, 
"localhost:2181"))
+
   @BeforeEach
   def setup(): Unit = {
 addPartitionsToTxnManager = new AddPartitionsToTxnManager(
-  KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:2181")),
+  config,
   networkClient,
-  time)
+  metadataCache,
+  partitionFor,
+  time
+)
   }
 
   @AfterEach
   def teardown(): Unit = {
 addPartitionsToTxnManager.shutdown()
   }
 
-  def setErrors(errors: mutable.Map[TopicPartition, Errors])(callbackErrors: 
Map[TopicPartition, Errors]): Unit = {
-callbackErrors.foreach {
-  case (tp, error) => errors.put(tp, error)
-}
+  private def setErrors(errors: mutable.Map[TopicPartition, 
Errors])(callbackErrors: Map[TopicPartition, Errors]): Unit = {
+callbackErrors.forKeyValue(errors.put)
   }
 
   @Test
   def testAddTxnData(): Unit = {
+when(partitionFor.apply(transactionalId1)).thenReturn(0)

Review Comment:
   I wonder if there is a way to make a helper where you pass in id + 
partition. But that might not be worth it for 2 tests.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [kafka] jolshan commented on a diff in pull request #14402: MINOR: Push logic to resolve the transaction coordinator into the AddPartitionsToTxnManager

2023-09-19 Thread via GitHub


jolshan commented on code in PR #14402:
URL: https://github.com/apache/kafka/pull/14402#discussion_r1330516886


##
core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala:
##
@@ -68,87 +71,143 @@ class AddPartitionsToTxnManagerTest {
   private val versionMismatchResponse = clientResponse(null, mismatchException 
= new UnsupportedVersionException(""))
   private val disconnectedResponse = clientResponse(null, disconnected = true)
 
+  private val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, 
"localhost:2181"))
+
   @BeforeEach
   def setup(): Unit = {
 addPartitionsToTxnManager = new AddPartitionsToTxnManager(
-  KafkaConfig.fromProps(TestUtils.createBrokerConfig(1, "localhost:2181")),
+  config,
   networkClient,
-  time)
+  metadataCache,
+  partitionFor,
+  time
+)
   }
 
   @AfterEach
   def teardown(): Unit = {
 addPartitionsToTxnManager.shutdown()
   }
 
-  def setErrors(errors: mutable.Map[TopicPartition, Errors])(callbackErrors: 
Map[TopicPartition, Errors]): Unit = {
-callbackErrors.foreach {
-  case (tp, error) => errors.put(tp, error)
-}
+  private def setErrors(errors: mutable.Map[TopicPartition, 
Errors])(callbackErrors: Map[TopicPartition, Errors]): Unit = {
+callbackErrors.forKeyValue(errors.put)
   }
 
   @Test
   def testAddTxnData(): Unit = {
+when(partitionFor.apply(transactionalId1)).thenReturn(0)
+when(partitionFor.apply(transactionalId2)).thenReturn(1)
+when(partitionFor.apply(transactionalId3)).thenReturn(0)
+
when(metadataCache.getTopicMetadata(Set(Topic.TRANSACTION_STATE_TOPIC_NAME), 
config.interBrokerListenerName))
+  .thenReturn(Seq(
+new MetadataResponseData.MetadataResponseTopic()
+  .setName(Topic.TRANSACTION_STATE_TOPIC_NAME)
+  .setPartitions(List(
+new MetadataResponseData.MetadataResponsePartition()
+  .setPartitionIndex(0)
+  .setLeaderId(0),
+new MetadataResponseData.MetadataResponsePartition()
+  .setPartitionIndex(1)
+  .setLeaderId(1)
+  ).asJava)
+  ))
+when(metadataCache.getAliveBrokerNode(0, config.interBrokerListenerName))
+  .thenReturn(Some(node0))
+when(metadataCache.getAliveBrokerNode(1, config.interBrokerListenerName))
+  .thenReturn(Some(node1))
+
 val transaction1Errors = mutable.Map[TopicPartition, Errors]()
 val transaction2Errors = mutable.Map[TopicPartition, Errors]()
 val transaction3Errors = mutable.Map[TopicPartition, Errors]()
 
-addPartitionsToTxnManager.addTxnData(node0, 
transactionData(transactionalId1, producerId1), setErrors(transaction1Errors))
-addPartitionsToTxnManager.addTxnData(node1, 
transactionData(transactionalId2, producerId2), setErrors(transaction2Errors))
-addPartitionsToTxnManager.addTxnData(node0, 
transactionData(transactionalId3, producerId3), setErrors(transaction3Errors))
+addPartitionsToTxnManager.addTxnData(transactionalId1, producerId1, 
producerEpoch = 0, verifyOnly = true, topicPartitions, 
setErrors(transaction1Errors))

Review Comment:
   Did we specify the param name here for readability?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org