rajinisivaram commented on code in PR #13303:
URL: https://github.com/apache/kafka/pull/13303#discussion_r1128444212


##########
clients/src/main/java/org/apache/kafka/clients/consumer/internals/PrototypeAsyncConsumer.java:
##########
@@ -226,11 +246,20 @@ public void commitAsync(OffsetCommitCallback callback) {
 
     @Override
     public void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, 
OffsetCommitCallback callback) {
-        final CommitApplicationEvent commitEvent = new 
CommitApplicationEvent(offsets);
-        commitEvent.future().whenComplete((r, t) -> {
-            callback.onComplete(offsets, new RuntimeException(t));
+        CompletableFuture<Void> future = commit(offsets);
+        future.whenComplete((r, t) -> {
+            if (t != null) {
+                callback.onComplete(offsets, new RuntimeException(t));
+            } else {
+                callback.onComplete(offsets, null);
+            }
         });
+    }
+
+    private CompletableFuture<Void> commit(Map<TopicPartition, 
OffsetAndMetadata> offsets) {

Review Comment:
   I thought we have those methods on the existing Consumer interface. Anyway, 
that doesn't impact this PR.



##########
clients/src/test/java/org/apache/kafka/clients/consumer/internals/PrototypeAsyncConsumerTest.java:
##########
@@ -16,90 +16,77 @@
  */
 package org.apache.kafka.clients.consumer.internals;
 
+import org.apache.kafka.clients.consumer.Consumer;
 import org.apache.kafka.clients.consumer.ConsumerConfig;
-import org.apache.kafka.clients.consumer.OffsetResetStrategy;
-import org.apache.kafka.clients.consumer.internals.events.EventHandler;
 import org.apache.kafka.common.KafkaException;
 import org.apache.kafka.common.internals.ClusterResourceListeners;
-import org.apache.kafka.common.metrics.Metrics;
+import org.apache.kafka.common.serialization.Deserializer;
 import org.apache.kafka.common.serialization.StringDeserializer;
 import org.apache.kafka.common.utils.LogContext;
 import org.apache.kafka.common.utils.MockTime;
+import org.apache.kafka.common.utils.Time;
+import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
-import org.mockito.Mockito;
 
+import java.time.Duration;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.Optional;
 
-import static java.util.Collections.singleton;
-import static 
org.apache.kafka.clients.consumer.ConsumerConfig.CLIENT_ID_CONFIG;
+import static 
org.apache.kafka.clients.consumer.ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG;
 import static 
org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG;
 import static 
org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG;
-import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 
 public class PrototypeAsyncConsumerTest {
-    private Map<String, Object> properties;
-    private SubscriptionState subscriptionState;
-    private MockTime time;
-    private LogContext logContext;
-    private Metrics metrics;
-    private ClusterResourceListeners clusterResourceListeners;
-    private Optional<String> groupId;
-    private String clientId;
-    private EventHandler eventHandler;
+
+    private Consumer<?, ?> consumer;
+    private Map<String, Object> consumerProps = new HashMap<>();
+
+    private final Time time = new MockTime();
 
     @BeforeEach
     public void setup() {
-        this.subscriptionState = Mockito.mock(SubscriptionState.class);
-        this.eventHandler = Mockito.mock(DefaultEventHandler.class);
-        this.logContext = new LogContext();
-        this.time = new MockTime();
-        this.metrics = new Metrics(time);
-        this.groupId = Optional.empty();
-        this.clientId = "client-1";
-        this.clusterResourceListeners = new ClusterResourceListeners();
-        this.properties = new HashMap<>();
-        this.properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
-                "localhost" +
-                ":9999");
-        this.properties.put(KEY_DESERIALIZER_CLASS_CONFIG, 
StringDeserializer.class);
-        this.properties.put(VALUE_DESERIALIZER_CLASS_CONFIG, 
StringDeserializer.class);
-        this.properties.put(CLIENT_ID_CONFIG, "test-client");
+        injectConsumerConfigs();
+    }
+
+    @AfterEach
+    public void cleanup() {
+        if (consumer != null) {
+            consumer.close(Duration.ZERO);
+        }
     }
+
     @Test
-    public void testSubscription() {
-        this.subscriptionState =
-                new SubscriptionState(new LogContext(), 
OffsetResetStrategy.EARLIEST);
-        PrototypeAsyncConsumer<String, String> consumer =
-                setupConsumerWithDefault();
-        subscriptionState.subscribe(singleton("t1"),
-                new NoOpConsumerRebalanceListener());
-        assertEquals(1, consumer.subscription().size());
+    public void testBackgroundThreadRunning() {
+        consumer = newConsumer(time, new StringDeserializer(), new 
StringDeserializer());
     }
 
     @Test
     public void testUnimplementedException() {
-        PrototypeAsyncConsumer<String, String> consumer =
-                setupConsumerWithDefault();
+        consumer = newConsumer(time, new StringDeserializer(), new 
StringDeserializer());
         assertThrows(KafkaException.class, consumer::assignment, "not 
implemented exception");
     }
 
-    public PrototypeAsyncConsumer<String, String> setupConsumerWithDefault() {
-        ConsumerConfig config = new ConsumerConfig(properties);
-        return new PrototypeAsyncConsumer<>(
-                this.time,
-                this.logContext,
-                config,
-                this.subscriptionState,
-                this.eventHandler,
-                this.metrics,
-                this.clusterResourceListeners,
-                this.groupId,
-                this.clientId,
-                0);
+    private ConsumerMetadata createMetadata(SubscriptionState subscription) {
+        return new ConsumerMetadata(0, Long.MAX_VALUE, false, false,
+                subscription, new LogContext(), new 
ClusterResourceListeners());
+    }
+
+    private void injectConsumerConfigs() {
+        consumerProps.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");

Review Comment:
   For the longer term, it will be good to use a MockClient rather than one 
that attempts to connect and fills the log with exceptions. For now, this seems 
reasonable.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to