frankvicky commented on code in PR #16227:
URL: https://github.com/apache/kafka/pull/16227#discussion_r1631832337


##########
clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerRecordsTest.java:
##########
@@ -59,4 +49,72 @@ public void iterator() throws Exception {
         }
         assertEquals(2, c);
     }
+
+    @Test
+    public void testRecordsWithNullTopic() {
+        String nullTopic = null;
+        Map<TopicPartition, List<ConsumerRecord<Integer, String>>> records = 
new LinkedHashMap<>();
+        ConsumerRecords<Integer, String> consumerRecords = new 
ConsumerRecords<>(records);
+        IllegalArgumentException exception = 
assertThrows(IllegalArgumentException.class, () -> 
consumerRecords.records(nullTopic));
+        assertEquals("Topic must be non-null.", exception.getMessage());
+    }
+
+    @Test
+    public void testRecords() {
+        String[] topics = {"topic1", "topic2", "topic3", "topic4"};
+        int recordSize = 3;
+        Map<TopicPartition, List<ConsumerRecord<Integer, String>>> 
partitionToRecords = buildTopicTestRecords(recordSize, topics);
+        ConsumerRecords<Integer, String> consumerRecords = new 
ConsumerRecords<>(partitionToRecords);
+        for (String topic : topics) {
+            Iterable<ConsumerRecord<Integer, String>> records = 
consumerRecords.records(topic);
+
+            int count  = 0;
+            Iterator<ConsumerRecord<Integer, String>> iterator = 
records.iterator();
+            for (; iterator.hasNext() && count < recordSize; count++) {
+                ConsumerRecord<Integer, String> record = iterator.next();
+                assertEquals(topic, record.topic());
+                assertEquals(count, record.partition());
+                assertEquals(count, record.offset());
+                assertEquals(count, record.key());
+                assertEquals(String.valueOf(count), record.value());
+            }
+        }
+    }
+
+    private Map<TopicPartition, List<ConsumerRecord<Integer, String>>> 
buildSingleTopicTestRecords(String topic) {
+        Map<TopicPartition, List<ConsumerRecord<Integer, String>>> records = 
new LinkedHashMap<>();
+        ConsumerRecord<Integer, String> record1 = new ConsumerRecord<>(topic, 
1, 0, 0L, TimestampType.CREATE_TIME,
+            0, 0, 1, "value1", new RecordHeaders(), Optional.empty());
+        ConsumerRecord<Integer, String> record2 = new ConsumerRecord<>(topic, 
1, 1, 0L, TimestampType.CREATE_TIME,
+            0, 0, 2, "value2", new RecordHeaders(), Optional.empty());
+
+        new ArrayList<>();
+        for (int i = 0; i < 3; i++) {
+            new ConsumerRecord<>(topic, i, i, 0L, TimestampType.CREATE_TIME,
+                0, 0, 2, String.valueOf(i), new RecordHeaders(), 
Optional.empty());
+        }
+
+        records.put(new TopicPartition(topic, 0), new ArrayList<>());
+        records.put(new TopicPartition(topic, 1), Arrays.asList(record1, 
record2));
+        records.put(new TopicPartition(topic, 2), new ArrayList<>());
+        return records;
+    }
+
+    private Map<TopicPartition, List<ConsumerRecord<Integer, String>>> 
buildTopicTestRecords(int recordSize, String... topics) {

Review Comment:
   Yes, it is more clear that we return `ConsumerRecords` directly.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to