dajac commented on code in PR #12501:
URL: https://github.com/apache/kafka/pull/12501#discussion_r950107432


##########
core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala:
##########
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.api
+
+import java.util.{Collections, Properties}
+
+import kafka.integration.KafkaServerTestHarness
+import kafka.server.KafkaConfig
+import kafka.utils.{TestInfoUtils, TestUtils}
+import kafka.utils.TestUtils.{consumeRecords, createAdminClient}
+import org.apache.kafka.clients.admin.{Admin, ProducerState}
+import org.apache.kafka.clients.consumer.KafkaConsumer
+import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.errors.{InvalidPidMappingException, 
TransactionalIdNotFoundException}
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo}
+import org.junit.jupiter.params.ParameterizedTest
+import org.junit.jupiter.params.provider.ValueSource
+
+import scala.jdk.CollectionConverters._
+import scala.collection.Seq
+
+// Test class that tests producer ID expiration
+class ProducerIdExpirationTest extends KafkaServerTestHarness {
+  val topic1 = "topic1"
+  val numPartitions = 4
+  val replicationFactor = 3
+  val tp0 = new TopicPartition(topic1, 0)
+
+  var producer: KafkaProducer[Array[Byte], Array[Byte]] = _
+  var consumer: KafkaConsumer[Array[Byte], Array[Byte]] = _
+  var admin: Admin = _
+
+  override def generateConfigs: Seq[KafkaConfig] = {
+    TestUtils.createBrokerConfigs(3, 
zkConnectOrNull).map(KafkaConfig.fromProps(_, serverProps()))
+  }
+
+  @BeforeEach
+  override def setUp(testInfo: TestInfo): Unit = {
+    super.setUp(testInfo)
+    consumer = TestUtils.createConsumer(bootstrapServers(),
+      enableAutoCommit = false,
+      readCommitted = true)
+    admin = createAdminClient(brokers, listenerName)
+
+    createTopic(topic1, numPartitions, 3)
+  }
+
+  @AfterEach
+  override def tearDown(): Unit = {
+    if (producer != null)
+      producer.close()
+    consumer.close()
+    admin.close()
+
+    super.tearDown()
+  }
+
+  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
+  @ValueSource(strings = Array("zk", "kraft"))
+  def testProducerIdExpirationWithNoTransactions(quorum: String): Unit = {
+    producer = TestUtils.createProducer(bootstrapServers(), enableIdempotence 
= true)
+
+    // Send records to populate producer state cache.
+    producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, 
"value".getBytes))
+    producer.flush()
+
+    // Ensure producer IDs are added.
+    ensureConsistentKRaftMetadata()
+    assertEquals(1, producerState.size)
+
+    // Wait for the producer ID to expire

Review Comment:
   nit: `.` in the end. There are a few other cases in this class.



##########
core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala:
##########
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.api
+
+import java.util.{Collections, Properties}
+
+import kafka.integration.KafkaServerTestHarness
+import kafka.server.KafkaConfig
+import kafka.utils.{TestInfoUtils, TestUtils}
+import kafka.utils.TestUtils.{consumeRecords, createAdminClient}
+import org.apache.kafka.clients.admin.{Admin, ProducerState}
+import org.apache.kafka.clients.consumer.KafkaConsumer
+import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.errors.{InvalidPidMappingException, 
TransactionalIdNotFoundException}
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo}
+import org.junit.jupiter.params.ParameterizedTest
+import org.junit.jupiter.params.provider.ValueSource
+
+import scala.jdk.CollectionConverters._
+import scala.collection.Seq
+
+// Test class that tests producer ID expiration

Review Comment:
   nit: We can remove this comment. The class name is self explanatory in my 
opinion.



##########
core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala:
##########
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.api
+
+import java.util.{Collections, Properties}
+
+import kafka.integration.KafkaServerTestHarness
+import kafka.server.KafkaConfig
+import kafka.utils.{TestInfoUtils, TestUtils}
+import kafka.utils.TestUtils.{consumeRecords, createAdminClient}
+import org.apache.kafka.clients.admin.{Admin, ProducerState}
+import org.apache.kafka.clients.consumer.KafkaConsumer
+import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.errors.{InvalidPidMappingException, 
TransactionalIdNotFoundException}
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo}
+import org.junit.jupiter.params.ParameterizedTest
+import org.junit.jupiter.params.provider.ValueSource
+
+import scala.jdk.CollectionConverters._
+import scala.collection.Seq
+
+// Test class that tests producer ID expiration
+class ProducerIdExpirationTest extends KafkaServerTestHarness {
+  val topic1 = "topic1"
+  val numPartitions = 4
+  val replicationFactor = 3
+  val tp0 = new TopicPartition(topic1, 0)
+
+  var producer: KafkaProducer[Array[Byte], Array[Byte]] = _
+  var consumer: KafkaConsumer[Array[Byte], Array[Byte]] = _
+  var admin: Admin = _
+
+  override def generateConfigs: Seq[KafkaConfig] = {
+    TestUtils.createBrokerConfigs(3, 
zkConnectOrNull).map(KafkaConfig.fromProps(_, serverProps()))
+  }
+
+  @BeforeEach
+  override def setUp(testInfo: TestInfo): Unit = {
+    super.setUp(testInfo)
+    consumer = TestUtils.createConsumer(bootstrapServers(),
+      enableAutoCommit = false,
+      readCommitted = true)
+    admin = createAdminClient(brokers, listenerName)
+
+    createTopic(topic1, numPartitions, 3)
+  }
+
+  @AfterEach
+  override def tearDown(): Unit = {
+    if (producer != null)
+      producer.close()
+    consumer.close()
+    admin.close()
+
+    super.tearDown()
+  }
+
+  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
+  @ValueSource(strings = Array("zk", "kraft"))
+  def testProducerIdExpirationWithNoTransactions(quorum: String): Unit = {
+    producer = TestUtils.createProducer(bootstrapServers(), enableIdempotence 
= true)
+
+    // Send records to populate producer state cache.
+    producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, 
"value".getBytes))
+    producer.flush()
+
+    // Ensure producer IDs are added.
+    ensureConsistentKRaftMetadata()
+    assertEquals(1, producerState.size)
+
+    // Wait for the producer ID to expire
+    TestUtils.waitUntilTrue(() => producerState.isEmpty, "Producer ID did not 
expire.")
+
+    // Send more records to send producer ID back to brokers.
+    producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, 
"value".getBytes))
+    producer.flush()
+
+    // Producer IDs should repopulate.
+    assertEquals(1, producerState.size)
+  }
+
+  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
+  @ValueSource(strings = Array("zk", "kraft"))
+  def testTransactionAfterTransactionIdExpiresButProducerIdRemains(quorum: 
String): Unit = {
+    producer = TestUtils.createTransactionalProducer("transactionalProducer", 
brokers)
+    producer.initTransactions()
+
+    // Start and then abort a transaction to allow the producer ID to expire
+    producer.beginTransaction()
+    
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, 
"2", "2", willBeCommitted = false))
+    producer.flush()
+
+    // Ensure producer IDs are added.
+    assertEquals(1, producerState.size)
+
+    producer.abortTransaction()
+
+    // Wait for the transactional ID to expire
+    Thread.sleep(3000)

Review Comment:
   Could we actually get rid of this one and poll `describeTransactions` to 
wait until the transactional ID is expired? 



##########
core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala:
##########
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.api
+
+import java.util.{Collections, Properties}
+
+import kafka.integration.KafkaServerTestHarness
+import kafka.server.KafkaConfig
+import kafka.utils.{TestInfoUtils, TestUtils}
+import kafka.utils.TestUtils.{consumeRecords, createAdminClient}
+import org.apache.kafka.clients.admin.{Admin, ProducerState}
+import org.apache.kafka.clients.consumer.KafkaConsumer
+import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.errors.{InvalidPidMappingException, 
TransactionalIdNotFoundException}
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo}
+import org.junit.jupiter.params.ParameterizedTest
+import org.junit.jupiter.params.provider.ValueSource
+
+import scala.jdk.CollectionConverters._
+import scala.collection.Seq
+
+// Test class that tests producer ID expiration
+class ProducerIdExpirationTest extends KafkaServerTestHarness {
+  val topic1 = "topic1"
+  val numPartitions = 4
+  val replicationFactor = 3
+  val tp0 = new TopicPartition(topic1, 0)
+
+  var producer: KafkaProducer[Array[Byte], Array[Byte]] = _
+  var consumer: KafkaConsumer[Array[Byte], Array[Byte]] = _
+  var admin: Admin = _
+
+  override def generateConfigs: Seq[KafkaConfig] = {
+    TestUtils.createBrokerConfigs(3, 
zkConnectOrNull).map(KafkaConfig.fromProps(_, serverProps()))
+  }
+
+  @BeforeEach
+  override def setUp(testInfo: TestInfo): Unit = {
+    super.setUp(testInfo)
+    consumer = TestUtils.createConsumer(bootstrapServers(),
+      enableAutoCommit = false,
+      readCommitted = true)
+    admin = createAdminClient(brokers, listenerName)
+
+    createTopic(topic1, numPartitions, 3)
+  }
+
+  @AfterEach
+  override def tearDown(): Unit = {
+    if (producer != null)
+      producer.close()
+    consumer.close()
+    admin.close()
+
+    super.tearDown()
+  }
+
+  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
+  @ValueSource(strings = Array("zk", "kraft"))
+  def testProducerIdExpirationWithNoTransactions(quorum: String): Unit = {
+    producer = TestUtils.createProducer(bootstrapServers(), enableIdempotence 
= true)
+
+    // Send records to populate producer state cache.
+    producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, 
"value".getBytes))
+    producer.flush()
+
+    // Ensure producer IDs are added.
+    ensureConsistentKRaftMetadata()
+    assertEquals(1, producerState.size)
+
+    // Wait for the producer ID to expire
+    TestUtils.waitUntilTrue(() => producerState.isEmpty, "Producer ID did not 
expire.")
+
+    // Send more records to send producer ID back to brokers.
+    producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, 
"value".getBytes))
+    producer.flush()
+
+    // Producer IDs should repopulate.
+    assertEquals(1, producerState.size)
+  }
+
+  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
+  @ValueSource(strings = Array("zk", "kraft"))
+  def testTransactionAfterTransactionIdExpiresButProducerIdRemains(quorum: 
String): Unit = {
+    producer = TestUtils.createTransactionalProducer("transactionalProducer", 
brokers)
+    producer.initTransactions()
+
+    // Start and then abort a transaction to allow the producer ID to expire
+    producer.beginTransaction()
+    
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, 
"2", "2", willBeCommitted = false))
+    producer.flush()
+
+    // Ensure producer IDs are added.
+    assertEquals(1, producerState.size)
+
+    producer.abortTransaction()
+
+    // Wait for the transactional ID to expire
+    Thread.sleep(3000)
+
+    // Confirm the transactional IDs expired
+    val txnDescribeResult = 
admin.describeTransactions(Collections.singletonList("transactionalProducer")).description("transactionalProducer")
+    org.apache.kafka.test.TestUtils.assertFutureThrows(txnDescribeResult, 
classOf[TransactionalIdNotFoundException])
+
+    // Producer IDs should be retained.
+    assertEquals(1, producerState.size)
+
+    // Start a new transaction and attempt to send, which will trigger an 
AddPartitionsToTxnRequest, which will fail due to the expired transactional ID
+    producer.beginTransaction()
+    val failedFuture = 
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 3, 
"1", "1", willBeCommitted = false))
+    TestUtils.waitUntilTrue(() => failedFuture.isDone, "Producer future never 
completed.")
+
+    org.apache.kafka.test.TestUtils.assertFutureThrows(failedFuture, 
classOf[InvalidPidMappingException])
+    producer.abortTransaction()
+
+    producer.beginTransaction()
+    
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 2, 
"4", "4", willBeCommitted = true))
+    
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 3, 
"3", "3", willBeCommitted = true))
+    producer.commitTransaction()
+
+    // Producer IDs should be retained.
+    assertEquals(1, producerState.size)

Review Comment:
   For my understanding, this assertion verifies that the producer ID created 
by the first transaction is still there, right? As the last transaction 
produces to partition 2 and 3, should we verify them as well?



##########
core/src/test/scala/integration/kafka/api/TransactionsExpirationTest.scala:
##########
@@ -105,6 +122,59 @@ class TransactionsExpirationTest extends 
KafkaServerTestHarness {
     }
   }
 
+  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
+  @ValueSource(strings = Array("zk", "kraft"))
+  def testTransactionAfterProducerIdExpires(quorum: String): Unit = {
+    producer.initTransactions()
+
+    // Start and then abort a transaction to allow the producer ID to expire
+    producer.beginTransaction()
+    
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, 
"2", "2", willBeCommitted = false))
+    producer.flush()
+
+    // Ensure producer IDs are added.
+    val pState = producerState
+    assertEquals(1, pState.size)
+    val oldProducerId = pState.get(0).producerId
+
+    producer.abortTransaction()
+
+    // Wait for the producer ID to expire
+    TestUtils.waitUntilTrue(() => producerState.isEmpty, "Producer IDs for 
topic1 did not expire.")
+
+    // Create a new producer to check that we retain the producer ID in 
transactional state.
+    producer.close()
+    producer = TestUtils.createTransactionalProducer("transactionalProducer", 
brokers)
+    producer.initTransactions()
+
+    // Start a new transaction and attempt to send. This should work since 
only the producer ID was removed from its mapping in ProducerStateManager.
+    producer.beginTransaction()
+    
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, 
"4", "4", willBeCommitted = true))
+    
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 3, 
"3", "3", willBeCommitted = true))
+    producer.commitTransaction()
+
+    // Producer IDs should repopulate.
+    val pState2 = producerState
+    assertEquals(1, pState2.size)
+    val newProducerId = pState2.get(0).producerId
+
+    // Producer IDs should be the same
+    assertEquals(oldProducerId, newProducerId)
+
+    consumer.subscribe(List(topic1).asJava)
+
+    val records = consumeRecords(consumer, 2)
+    records.foreach { record =>
+      TestUtils.assertCommittedAndGetValue(record)
+    }
+  }
+
+  private def producerState: java.util.List[ProducerState] = {

Review Comment:
   nit: I would use a scala collection here as well.



##########
core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala:
##########
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.api
+
+import java.util.{Collections, Properties}
+
+import kafka.integration.KafkaServerTestHarness
+import kafka.server.KafkaConfig
+import kafka.utils.{TestInfoUtils, TestUtils}
+import kafka.utils.TestUtils.{consumeRecords, createAdminClient}
+import org.apache.kafka.clients.admin.{Admin, ProducerState}
+import org.apache.kafka.clients.consumer.KafkaConsumer
+import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.errors.{InvalidPidMappingException, 
TransactionalIdNotFoundException}
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo}
+import org.junit.jupiter.params.ParameterizedTest
+import org.junit.jupiter.params.provider.ValueSource
+
+import scala.jdk.CollectionConverters._
+import scala.collection.Seq
+
+// Test class that tests producer ID expiration
+class ProducerIdExpirationTest extends KafkaServerTestHarness {
+  val topic1 = "topic1"
+  val numPartitions = 4
+  val replicationFactor = 3
+  val tp0 = new TopicPartition(topic1, 0)
+
+  var producer: KafkaProducer[Array[Byte], Array[Byte]] = _
+  var consumer: KafkaConsumer[Array[Byte], Array[Byte]] = _
+  var admin: Admin = _
+
+  override def generateConfigs: Seq[KafkaConfig] = {
+    TestUtils.createBrokerConfigs(3, 
zkConnectOrNull).map(KafkaConfig.fromProps(_, serverProps()))
+  }
+
+  @BeforeEach
+  override def setUp(testInfo: TestInfo): Unit = {
+    super.setUp(testInfo)
+    consumer = TestUtils.createConsumer(bootstrapServers(),
+      enableAutoCommit = false,
+      readCommitted = true)
+    admin = createAdminClient(brokers, listenerName)
+
+    createTopic(topic1, numPartitions, 3)
+  }
+
+  @AfterEach
+  override def tearDown(): Unit = {
+    if (producer != null)
+      producer.close()
+    consumer.close()
+    admin.close()
+
+    super.tearDown()
+  }
+
+  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
+  @ValueSource(strings = Array("zk", "kraft"))
+  def testProducerIdExpirationWithNoTransactions(quorum: String): Unit = {
+    producer = TestUtils.createProducer(bootstrapServers(), enableIdempotence 
= true)
+
+    // Send records to populate producer state cache.
+    producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, 
"value".getBytes))
+    producer.flush()
+
+    // Ensure producer IDs are added.
+    ensureConsistentKRaftMetadata()
+    assertEquals(1, producerState.size)
+
+    // Wait for the producer ID to expire
+    TestUtils.waitUntilTrue(() => producerState.isEmpty, "Producer ID did not 
expire.")
+
+    // Send more records to send producer ID back to brokers.
+    producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, 
"value".getBytes))
+    producer.flush()
+
+    // Producer IDs should repopulate.
+    assertEquals(1, producerState.size)
+  }
+
+  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
+  @ValueSource(strings = Array("zk", "kraft"))
+  def testTransactionAfterTransactionIdExpiresButProducerIdRemains(quorum: 
String): Unit = {
+    producer = TestUtils.createTransactionalProducer("transactionalProducer", 
brokers)
+    producer.initTransactions()
+
+    // Start and then abort a transaction to allow the producer ID to expire
+    producer.beginTransaction()
+    
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, 
"2", "2", willBeCommitted = false))
+    producer.flush()
+
+    // Ensure producer IDs are added.
+    assertEquals(1, producerState.size)
+
+    producer.abortTransaction()
+
+    // Wait for the transactional ID to expire
+    Thread.sleep(3000)
+
+    // Confirm the transactional IDs expired
+    val txnDescribeResult = 
admin.describeTransactions(Collections.singletonList("transactionalProducer")).description("transactionalProducer")
+    org.apache.kafka.test.TestUtils.assertFutureThrows(txnDescribeResult, 
classOf[TransactionalIdNotFoundException])
+
+    // Producer IDs should be retained.
+    assertEquals(1, producerState.size)
+
+    // Start a new transaction and attempt to send, which will trigger an 
AddPartitionsToTxnRequest, which will fail due to the expired transactional ID
+    producer.beginTransaction()
+    val failedFuture = 
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 3, 
"1", "1", willBeCommitted = false))
+    TestUtils.waitUntilTrue(() => failedFuture.isDone, "Producer future never 
completed.")
+
+    org.apache.kafka.test.TestUtils.assertFutureThrows(failedFuture, 
classOf[InvalidPidMappingException])
+    producer.abortTransaction()
+
+    producer.beginTransaction()
+    
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 2, 
"4", "4", willBeCommitted = true))
+    
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 3, 
"3", "3", willBeCommitted = true))
+    producer.commitTransaction()
+
+    // Producer IDs should be retained.
+    assertEquals(1, producerState.size)
+
+    // Check we can still consume the transaction
+    consumer.subscribe(List(topic1).asJava)
+
+    val records = consumeRecords(consumer, 2)
+    records.foreach { record =>
+      TestUtils.assertCommittedAndGetValue(record)
+    }
+  }
+
+  private def producerState: java.util.List[ProducerState] = {

Review Comment:
   nit: I would rather use a Scala collection as return type.



##########
core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala:
##########
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.api
+
+import java.util.{Collections, Properties}
+
+import kafka.integration.KafkaServerTestHarness
+import kafka.server.KafkaConfig
+import kafka.utils.{TestInfoUtils, TestUtils}
+import kafka.utils.TestUtils.{consumeRecords, createAdminClient}
+import org.apache.kafka.clients.admin.{Admin, ProducerState}
+import org.apache.kafka.clients.consumer.KafkaConsumer
+import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.errors.{InvalidPidMappingException, 
TransactionalIdNotFoundException}
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo}
+import org.junit.jupiter.params.ParameterizedTest
+import org.junit.jupiter.params.provider.ValueSource
+
+import scala.jdk.CollectionConverters._
+import scala.collection.Seq
+
+// Test class that tests producer ID expiration
+class ProducerIdExpirationTest extends KafkaServerTestHarness {
+  val topic1 = "topic1"
+  val numPartitions = 4
+  val replicationFactor = 3
+  val tp0 = new TopicPartition(topic1, 0)
+
+  var producer: KafkaProducer[Array[Byte], Array[Byte]] = _
+  var consumer: KafkaConsumer[Array[Byte], Array[Byte]] = _
+  var admin: Admin = _
+
+  override def generateConfigs: Seq[KafkaConfig] = {
+    TestUtils.createBrokerConfigs(3, 
zkConnectOrNull).map(KafkaConfig.fromProps(_, serverProps()))
+  }
+
+  @BeforeEach
+  override def setUp(testInfo: TestInfo): Unit = {
+    super.setUp(testInfo)
+    consumer = TestUtils.createConsumer(bootstrapServers(),
+      enableAutoCommit = false,
+      readCommitted = true)
+    admin = createAdminClient(brokers, listenerName)
+
+    createTopic(topic1, numPartitions, 3)
+  }
+
+  @AfterEach
+  override def tearDown(): Unit = {
+    if (producer != null)
+      producer.close()
+    consumer.close()
+    admin.close()
+
+    super.tearDown()
+  }
+
+  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
+  @ValueSource(strings = Array("zk", "kraft"))
+  def testProducerIdExpirationWithNoTransactions(quorum: String): Unit = {
+    producer = TestUtils.createProducer(bootstrapServers(), enableIdempotence 
= true)
+
+    // Send records to populate producer state cache.
+    producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, 
"value".getBytes))
+    producer.flush()
+
+    // Ensure producer IDs are added.
+    ensureConsistentKRaftMetadata()
+    assertEquals(1, producerState.size)
+
+    // Wait for the producer ID to expire
+    TestUtils.waitUntilTrue(() => producerState.isEmpty, "Producer ID did not 
expire.")
+
+    // Send more records to send producer ID back to brokers.
+    producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, 
"value".getBytes))
+    producer.flush()
+
+    // Producer IDs should repopulate.
+    assertEquals(1, producerState.size)
+  }
+
+  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
+  @ValueSource(strings = Array("zk", "kraft"))
+  def testTransactionAfterTransactionIdExpiresButProducerIdRemains(quorum: 
String): Unit = {
+    producer = TestUtils.createTransactionalProducer("transactionalProducer", 
brokers)
+    producer.initTransactions()
+
+    // Start and then abort a transaction to allow the producer ID to expire
+    producer.beginTransaction()
+    
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, 
"2", "2", willBeCommitted = false))
+    producer.flush()
+
+    // Ensure producer IDs are added.
+    assertEquals(1, producerState.size)
+
+    producer.abortTransaction()
+
+    // Wait for the transactional ID to expire
+    Thread.sleep(3000)
+
+    // Confirm the transactional IDs expired
+    val txnDescribeResult = 
admin.describeTransactions(Collections.singletonList("transactionalProducer")).description("transactionalProducer")
+    org.apache.kafka.test.TestUtils.assertFutureThrows(txnDescribeResult, 
classOf[TransactionalIdNotFoundException])
+
+    // Producer IDs should be retained.
+    assertEquals(1, producerState.size)
+
+    // Start a new transaction and attempt to send, which will trigger an 
AddPartitionsToTxnRequest, which will fail due to the expired transactional ID
+    producer.beginTransaction()
+    val failedFuture = 
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 3, 
"1", "1", willBeCommitted = false))
+    TestUtils.waitUntilTrue(() => failedFuture.isDone, "Producer future never 
completed.")
+
+    org.apache.kafka.test.TestUtils.assertFutureThrows(failedFuture, 
classOf[InvalidPidMappingException])
+    producer.abortTransaction()
+
+    producer.beginTransaction()
+    
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 2, 
"4", "4", willBeCommitted = true))
+    
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 3, 
"3", "3", willBeCommitted = true))
+    producer.commitTransaction()
+
+    // Producer IDs should be retained.
+    assertEquals(1, producerState.size)
+
+    // Check we can still consume the transaction
+    consumer.subscribe(List(topic1).asJava)
+
+    val records = consumeRecords(consumer, 2)
+    records.foreach { record =>
+      TestUtils.assertCommittedAndGetValue(record)
+    }
+  }
+
+  private def producerState: java.util.List[ProducerState] = {
+    val describeResult = 
admin.describeProducers(Collections.singletonList(tp0))
+    val activeProducers = 
describeResult.partitionResult(tp0).get().activeProducers()

Review Comment:
   nit: `()` can be omitted after `activeProducers` because it is a getter.



##########
core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala:
##########
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.api
+
+import java.util.{Collections, Properties}
+
+import kafka.integration.KafkaServerTestHarness
+import kafka.server.KafkaConfig
+import kafka.utils.{TestInfoUtils, TestUtils}
+import kafka.utils.TestUtils.{consumeRecords, createAdminClient}
+import org.apache.kafka.clients.admin.{Admin, ProducerState}
+import org.apache.kafka.clients.consumer.KafkaConsumer
+import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.errors.{InvalidPidMappingException, 
TransactionalIdNotFoundException}
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo}
+import org.junit.jupiter.params.ParameterizedTest
+import org.junit.jupiter.params.provider.ValueSource
+
+import scala.jdk.CollectionConverters._
+import scala.collection.Seq
+
+// Test class that tests producer ID expiration
+class ProducerIdExpirationTest extends KafkaServerTestHarness {
+  val topic1 = "topic1"
+  val numPartitions = 4
+  val replicationFactor = 3
+  val tp0 = new TopicPartition(topic1, 0)
+
+  var producer: KafkaProducer[Array[Byte], Array[Byte]] = _
+  var consumer: KafkaConsumer[Array[Byte], Array[Byte]] = _
+  var admin: Admin = _
+
+  override def generateConfigs: Seq[KafkaConfig] = {
+    TestUtils.createBrokerConfigs(3, 
zkConnectOrNull).map(KafkaConfig.fromProps(_, serverProps()))
+  }
+
+  @BeforeEach
+  override def setUp(testInfo: TestInfo): Unit = {
+    super.setUp(testInfo)
+    consumer = TestUtils.createConsumer(bootstrapServers(),
+      enableAutoCommit = false,
+      readCommitted = true)
+    admin = createAdminClient(brokers, listenerName)
+
+    createTopic(topic1, numPartitions, 3)
+  }
+
+  @AfterEach
+  override def tearDown(): Unit = {
+    if (producer != null)
+      producer.close()
+    consumer.close()
+    admin.close()

Review Comment:
   nit: It may be worth null checking those two before closing them as well.



##########
core/src/test/scala/integration/kafka/api/TransactionsExpirationTest.scala:
##########
@@ -79,13 +86,20 @@ class TransactionsExpirationTest extends 
KafkaServerTestHarness {
     
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, 0, 
"4", "4", willBeCommitted = false))
     producer.abortTransaction()
 
-    // Wait for the transactional ID to expire
+    // Ensure the transaction state exists
+    val describeState = 
admin.describeTransactions(Collections.singletonList("transactionalProducer")).description("transactionalProducer")
+    TestUtils.waitUntilTrue(() => describeState.isDone, "Transactional state 
was never added.")
+
     Thread.sleep(3000)
 
-    // Start a new transaction and attempt to send, which will trigger an 
AddPartitionsToTxnRequest, which will fail due to the expired producer ID
+    // Wait for the transactional state to expire
+    val describeResult = 
admin.describeTransactions(Collections.singletonList("transactionalProducer")).description("transactionalProducer")

Review Comment:
   Same comment as before, we may be able to remove the sleep here as well.



##########
core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala:
##########
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.api
+
+import java.util.{Collections, Properties}
+
+import kafka.integration.KafkaServerTestHarness
+import kafka.server.KafkaConfig
+import kafka.utils.{TestInfoUtils, TestUtils}
+import kafka.utils.TestUtils.{consumeRecords, createAdminClient}
+import org.apache.kafka.clients.admin.{Admin, ProducerState}
+import org.apache.kafka.clients.consumer.KafkaConsumer
+import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
+import org.apache.kafka.common.TopicPartition
+import org.apache.kafka.common.errors.{InvalidPidMappingException, 
TransactionalIdNotFoundException}
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo}
+import org.junit.jupiter.params.ParameterizedTest
+import org.junit.jupiter.params.provider.ValueSource
+
+import scala.jdk.CollectionConverters._
+import scala.collection.Seq
+
+// Test class that tests producer ID expiration
+class ProducerIdExpirationTest extends KafkaServerTestHarness {
+  val topic1 = "topic1"
+  val numPartitions = 4
+  val replicationFactor = 3
+  val tp0 = new TopicPartition(topic1, 0)
+
+  var producer: KafkaProducer[Array[Byte], Array[Byte]] = _
+  var consumer: KafkaConsumer[Array[Byte], Array[Byte]] = _
+  var admin: Admin = _
+
+  override def generateConfigs: Seq[KafkaConfig] = {
+    TestUtils.createBrokerConfigs(3, 
zkConnectOrNull).map(KafkaConfig.fromProps(_, serverProps()))
+  }
+
+  @BeforeEach
+  override def setUp(testInfo: TestInfo): Unit = {
+    super.setUp(testInfo)
+    consumer = TestUtils.createConsumer(bootstrapServers(),
+      enableAutoCommit = false,
+      readCommitted = true)
+    admin = createAdminClient(brokers, listenerName)
+
+    createTopic(topic1, numPartitions, 3)
+  }
+
+  @AfterEach
+  override def tearDown(): Unit = {
+    if (producer != null)
+      producer.close()
+    consumer.close()
+    admin.close()
+
+    super.tearDown()
+  }
+
+  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
+  @ValueSource(strings = Array("zk", "kraft"))
+  def testProducerIdExpirationWithNoTransactions(quorum: String): Unit = {
+    producer = TestUtils.createProducer(bootstrapServers(), enableIdempotence 
= true)
+
+    // Send records to populate producer state cache.
+    producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, 
"value".getBytes))
+    producer.flush()
+
+    // Ensure producer IDs are added.
+    ensureConsistentKRaftMetadata()
+    assertEquals(1, producerState.size)
+
+    // Wait for the producer ID to expire
+    TestUtils.waitUntilTrue(() => producerState.isEmpty, "Producer ID did not 
expire.")
+
+    // Send more records to send producer ID back to brokers.
+    producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, 
"value".getBytes))
+    producer.flush()
+
+    // Producer IDs should repopulate.
+    assertEquals(1, producerState.size)
+  }
+
+  @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumName)
+  @ValueSource(strings = Array("zk", "kraft"))
+  def testTransactionAfterTransactionIdExpiresButProducerIdRemains(quorum: 
String): Unit = {
+    producer = TestUtils.createTransactionalProducer("transactionalProducer", 
brokers)
+    producer.initTransactions()
+
+    // Start and then abort a transaction to allow the producer ID to expire
+    producer.beginTransaction()
+    
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, 
"2", "2", willBeCommitted = false))
+    producer.flush()
+
+    // Ensure producer IDs are added.
+    assertEquals(1, producerState.size)
+
+    producer.abortTransaction()
+
+    // Wait for the transactional ID to expire
+    Thread.sleep(3000)
+
+    // Confirm the transactional IDs expired
+    val txnDescribeResult = 
admin.describeTransactions(Collections.singletonList("transactionalProducer")).description("transactionalProducer")
+    org.apache.kafka.test.TestUtils.assertFutureThrows(txnDescribeResult, 
classOf[TransactionalIdNotFoundException])

Review Comment:
   nit: You could import it as follow in order to avoid this: `import 
org.apache.kafka.test.{TestUtils => JTestUtils}`.



##########
core/src/test/scala/integration/kafka/api/TransactionsExpirationTest.scala:
##########
@@ -97,6 +111,9 @@ class TransactionsExpirationTest extends 
KafkaServerTestHarness {
     
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 3, 
"3", "3", willBeCommitted = true))
     producer.commitTransaction()
 
+    val describeState2 = 
admin.describeTransactions(Collections.singletonList("transactionalProducer")).description("transactionalProducer")
+    TestUtils.waitUntilTrue(() => describeState2.isDone, "Transactional state 
was never added.")

Review Comment:
   nit: Would it make sense to factor out those two lines into a helper method 
to avoid the repetition?



##########
core/src/main/scala/kafka/server/KafkaConfig.scala:
##########
@@ -1295,7 +1298,8 @@ object KafkaConfig {
       .define(TransactionsAbortTimedOutTransactionCleanupIntervalMsProp, INT, 
Defaults.TransactionsAbortTimedOutTransactionsCleanupIntervalMS, atLeast(1), 
LOW, TransactionsAbortTimedOutTransactionsIntervalMsDoc)
       .define(TransactionsRemoveExpiredTransactionalIdCleanupIntervalMsProp, 
INT, Defaults.TransactionsRemoveExpiredTransactionsCleanupIntervalMS, 
atLeast(1), LOW, TransactionsRemoveExpiredTransactionsIntervalMsDoc)
 
-      .define(ProducerIdExpirationMsProp, INT, 
Defaults.ProducerIdExpirationMs, atLeast(1), HIGH, ProducerIdExpirationMsDoc)
+      .define(ProducerIdExpirationMsProp, INT, 
Defaults.ProducerIdExpirationMs, atLeast(1), LOW, ProducerIdExpirationMsDoc)
+      .defineInternal(ProducerIdExpirationCheckIntervalMsProp, INT, 
Defaults.ProducerIdExpirationCheckIntervalMs, atLeast(1), LOW, 
ProducerIdExpirationMsDoc)

Review Comment:
   nit: If we keep it internal, could we add a small comment about it.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to