adixitconfluent commented on code in PR #20826:
URL: https://github.com/apache/kafka/pull/20826#discussion_r2493597151
##########
core/src/test/scala/unit/kafka/server/KafkaApisTest.scala:
##########
@@ -6582,6 +6583,131 @@ class KafkaApisTest extends Logging {
assertArrayEquals(expectedAcquiredRecords(10, 19, 1).toArray(),
topicResponse.partitions.get(0).acquiredRecords.toArray())
}
+ @Test
+ def testHandleShareFetchRequestSuccessWithRenewAcknowledgements(): Unit = {
Review Comment:
I think we should enhance this test case by using both RENEW acks and the
previous present acknowledgements just to make sure things are working fine
##########
core/src/test/scala/unit/kafka/server/KafkaApisTest.scala:
##########
@@ -6582,6 +6583,131 @@ class KafkaApisTest extends Logging {
assertArrayEquals(expectedAcquiredRecords(10, 19, 1).toArray(),
topicResponse.partitions.get(0).acquiredRecords.toArray())
}
+ @Test
+ def testHandleShareFetchRequestSuccessWithRenewAcknowledgements(): Unit = {
+ val topicName = "foo"
+ val topicId = Uuid.randomUuid()
+ val partitionIndex = 0
+ metadataCache = initializeMetadataCacheWithShareGroupsEnabled()
+ addTopicToMetadataCache(topicName, 1, topicId = topicId)
+ val memberId: Uuid = Uuid.randomUuid()
+
+ val records1 = memoryRecords(10, 0)
+
+ val groupId = "group"
+
+ when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(),
anyInt(), anyInt(), any())).thenReturn(
+ CompletableFuture.completedFuture(util.Map.of[TopicIdPartition,
ShareFetchResponseData.PartitionData](
+ new TopicIdPartition(topicId, new TopicPartition(topicName,
partitionIndex)),
+ new ShareFetchResponseData.PartitionData()
+ .setErrorCode(Errors.NONE.code)
+ .setAcknowledgeErrorCode(Errors.NONE.code)
+ .setRecords(records1)
+ .setAcquiredRecords(new util.ArrayList(util.List.of(
+ new ShareFetchResponseData.AcquiredRecords()
+ .setFirstOffset(0)
+ .setLastOffset(9)
+ .setDeliveryCount(1)
+ )))
+ ))
+ )
+
+ val cachedSharePartitions = new
ImplicitLinkedHashCollection[CachedSharePartition]
+ cachedSharePartitions.mustAdd(new CachedSharePartition(
+ new TopicIdPartition(topicId, 0, topicName), false
+ ))
+
+ when(sharePartitionManager.newContext(any(), any(), any(), any(), any(),
any())).thenReturn(
+ new ShareSessionContext(new ShareRequestMetadata(memberId, 0),
util.List.of(
+ new TopicIdPartition(topicId, partitionIndex, topicName)
+ ))
+ ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId,
1), new ShareSession(
+ new ShareSessionKey(groupId, memberId), cachedSharePartitions, 2))
+ )
+
+ when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs(
Review Comment:
We can get rid of this mock. We don't need it.
##########
core/src/test/scala/unit/kafka/server/KafkaApisTest.scala:
##########
@@ -13765,6 +13899,73 @@ class KafkaApisTest extends Logging {
assertEquals(alterShareGroupOffsetsResponseData, response.data)
}
+ @ParameterizedTest
+ @CsvSource(value = Array("1,true,true", "1,false,true", "2,true,false",
"2,false,true"))
+ def testValidateAcknowledgementBatchesForRenew(version: Short, isRenew:
Boolean, shouldFail: Boolean): Unit = {
+ kafkaApis = createKafkaApis()
+ val tp = new TopicIdPartition(Uuid.randomUuid(), new
TopicPartition("topic", 0))
+ val ackMap = mutable.Map(tp -> util.List.of(new
ShareAcknowledgementBatch(0, 0, util.List.of(AcknowledgeType.RENEW.id))))
+ val erroneous:mutable.Map[TopicIdPartition,
ShareAcknowledgeResponseData.PartitionData] = mutable.Map()
+ val errorSet = kafkaApis.validateAcknowledgementBatches(ackMap, erroneous,
version, isRenewAck = isRenew)
+ if (shouldFail) {
+ assertEquals(1, errorSet.size, s"expected error topic partition,
version=${version}, isRenew=${isRenew}")
+ assertTrue(errorSet.contains(tp), s"error topic partition mismatch,
version=${version}, isRenew=${isRenew}")
+ } else {
+ assertEquals(0, errorSet.size, s"unexpected error topic partition,
version=${version}, isRenew=${isRenew}")
+ }
+ }
+
+ @Test
+ def testHandleShareFetchRenewInvalidRequest(): Unit = {
+ val topicId = Uuid.randomUuid()
+ val partitionIndex = 0
+ val groupId = "group"
+ val memberId = Uuid.randomUuid()
+ val testPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE,
"test-user")
+ val testClientAddress = InetAddress.getByName("192.168.1.100")
+ val testClientId = "test-client-id"
+ metadataCache = initializeMetadataCacheWithShareGroupsEnabled()
+
+ when(sharePartitionManager.newContext(any(), any(), any(), any(), any(),
any())).thenReturn(
+ new FinalContext()
+ )
+
+ val shareFetchRequestData = new ShareFetchRequestData()
+ .setGroupId(groupId)
+ .setMemberId(memberId.toString)
+ .setShareSessionEpoch(0)
+ .setIsRenewAck(true)
+ .setMinBytes(10)
+ .setMaxBytes(20)
+ .setMaxRecords(30)
+ .setMaxWaitMs(40)
+ .setTopics(new
ShareFetchRequestData.FetchTopicCollection(util.List.of(new
ShareFetchRequestData.FetchTopic()
+ .setTopicId(topicId)
+ .setPartitions(new
ShareFetchRequestData.FetchPartitionCollection(util.List.of(
+ new ShareFetchRequestData.FetchPartition()
+ .setAcknowledgementBatches(util.List.of(new AcknowledgementBatch()
+ .setFirstOffset(0)
+ .setLastOffset(0)
+ .setAcknowledgeTypes(util.List.of(AcknowledgeType.RENEW.id))))
+ .setPartitionIndex(partitionIndex)
+ ).iterator))
+ ).iterator))
+
+ val shareFetchRequest = new
ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion)
+
+ // Create request with custom principal and client address to test quota
tags
+ val requestHeader = new RequestHeader(shareFetchRequest.apiKey,
shareFetchRequest.version, testClientId, 0)
+ val request = buildRequest(shareFetchRequest, testPrincipal,
testClientAddress,
+ ListenerName.forSecurityProtocol(SecurityProtocol.SSL),
fromPrivilegedListener = false, Some(requestHeader), requestChannelMetrics)
+
+ val kafkaApis = createKafkaApis()
+ kafkaApis.handleShareFetchRequest(request)
+ val response = verifyNoThrottling[ShareFetchResponse](request)
+ val responseData = response.data()
+
+ assertEquals(Errors.INVALID_REQUEST.code, responseData.errorCode)
Review Comment:
In this case we should also add asserts over the error message, since you
have custom messages for different conditions
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]