divijvaidya commented on code in PR #14049:
URL: https://github.com/apache/kafka/pull/14049#discussion_r1275269942


##########
storage/src/test/java/org/apache/kafka/server/log/remote/metadata/storage/TopicBasedRemoteLogMetadataManagerTest.java:
##########
@@ -168,4 +169,64 @@ private void waitUntilConsumerCatchesup(TopicIdPartition 
newLeaderTopicIdPartiti
         }
     }
 
+    @Test
+    public void testRemoteLogSizeCalculationForUnknownTopicIdPartitionThrows() 
{
+        TopicIdPartition topicIdPartition = new 
TopicIdPartition(Uuid.randomUuid(), new TopicPartition("singleton", 0));
+        Assertions.assertThrows(RemoteResourceNotFoundException.class, () -> 
topicBasedRlmm().remoteLogSize(topicIdPartition, 0));
+    }
+
+    @Test
+    public void testRemoteLogSizeCalculationWithSegmentsOfTheSameEpoch() 
throws RemoteStorageException, TimeoutException {
+        TopicIdPartition topicIdPartition = new 
TopicIdPartition(Uuid.randomUuid(), new TopicPartition("singleton", 0));
+        TopicBasedRemoteLogMetadataManager topicBasedRemoteLogMetadataManager 
= topicBasedRlmm();
+
+        RemoteLogSegmentMetadata firstSegmentMetadata = new 
RemoteLogSegmentMetadata(new RemoteLogSegmentId(topicIdPartition, 
Uuid.randomUuid()),
+                0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, 
Collections.singletonMap(0, 0L));
+        RemoteLogSegmentMetadata secondSegmentMetadata = new 
RemoteLogSegmentMetadata(new RemoteLogSegmentId(topicIdPartition, 
Uuid.randomUuid()),
+                100, 200, -1L, 0, time.milliseconds(), SEG_SIZE * 2, 
Collections.singletonMap(0, 0L));
+        RemoteLogSegmentMetadata thirdSegmentMetadata = new 
RemoteLogSegmentMetadata(new RemoteLogSegmentId(topicIdPartition, 
Uuid.randomUuid()),
+                200, 300, -1L, 0, time.milliseconds(), SEG_SIZE * 3, 
Collections.singletonMap(0, 0L));
+
+        
topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(firstSegmentMetadata);
+        
topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(secondSegmentMetadata);
+        
topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(thirdSegmentMetadata);
+
+        
topicBasedRemoteLogMetadataManager.onPartitionLeadershipChanges(Collections.singleton(topicIdPartition),
 Collections.emptySet());
+
+        // RemoteLogSegmentMetadata events are already published, and 
topicBasedRlmm's consumer manager will start
+        // fetching those events and build the cache.
+        waitUntilConsumerCatchesup(topicIdPartition, topicIdPartition, 
30_000L);
+
+        Long remoteLogSize = 
topicBasedRemoteLogMetadataManager.remoteLogSize(topicIdPartition, 0);
+
+        Assertions.assertEquals(SEG_SIZE * 6, remoteLogSize);
+    }
+
+    @Test
+    public void testRemoteLogSizeCalculationWithSegmentsOfDifferentEpochs() 
throws RemoteStorageException, TimeoutException {
+        TopicIdPartition topicIdPartition = new 
TopicIdPartition(Uuid.randomUuid(), new TopicPartition("singleton", 0));
+        TopicBasedRemoteLogMetadataManager topicBasedRemoteLogMetadataManager 
= topicBasedRlmm();
+
+        RemoteLogSegmentMetadata firstSegmentMetadata = new 
RemoteLogSegmentMetadata(new RemoteLogSegmentId(topicIdPartition, 
Uuid.randomUuid()),
+                0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, 
Collections.singletonMap(0, 0L));
+        RemoteLogSegmentMetadata secondSegmentMetadata = new 
RemoteLogSegmentMetadata(new RemoteLogSegmentId(topicIdPartition, 
Uuid.randomUuid()),
+                100, 200, -1L, 0, time.milliseconds(), SEG_SIZE * 2, 
Collections.singletonMap(1, 100L));
+        RemoteLogSegmentMetadata thirdSegmentMetadata = new 
RemoteLogSegmentMetadata(new RemoteLogSegmentId(topicIdPartition, 
Uuid.randomUuid()),
+                200, 300, -1L, 0, time.milliseconds(), SEG_SIZE * 3, 
Collections.singletonMap(2, 200L));
+
+        
topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(firstSegmentMetadata);
+        
topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(secondSegmentMetadata);
+        
topicBasedRemoteLogMetadataManager.addRemoteLogSegmentMetadata(thirdSegmentMetadata);
+
+        
topicBasedRemoteLogMetadataManager.onPartitionLeadershipChanges(Collections.singleton(topicIdPartition),
 Collections.emptySet());
+
+        // RemoteLogSegmentMetadata events are already published, and 
topicBasedRlmm's consumer manager will start
+        // fetching those events and build the cache.
+        waitUntilConsumerCatchesup(topicIdPartition, topicIdPartition, 
30_000L);
+
+        Assertions.assertEquals(SEG_SIZE, 
topicBasedRemoteLogMetadataManager.remoteLogSize(topicIdPartition, 0));
+        Assertions.assertEquals(SEG_SIZE * 2, 
topicBasedRemoteLogMetadataManager.remoteLogSize(topicIdPartition, 1));
+        Assertions.assertEquals(SEG_SIZE * 3, 
topicBasedRemoteLogMetadataManager.remoteLogSize(topicIdPartition, 2));

Review Comment:
   I think what @showuon is saying is to add test where the second argument for 
`topicBasedRemoteLogMetadataManager.remoteLogSize(topicIdPartition, 0)` is 
let's say -1 or 10 where 10 is not in the leaderEpochCache and is an epoch that 
this has never seen. I think the behaviour should be to give a 0 (since we 
don't have remoteLog with that epoch, hence, size is 0).



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to