junrao commented on code in PR #14359: URL: https://github.com/apache/kafka/pull/14359#discussion_r1326276789
########## clients/src/test/java/org/apache/kafka/clients/consumer/internals/CompletedFetchTest.java: ########## @@ -115,13 +122,17 @@ public void testCommittedTransactionRecordsIncluded() { Records rawRecords = newTranscactionalRecords(ControlRecordType.COMMIT, numRecords); FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData() .setRecords(rawRecords); - CompletedFetch<String, String> completedFetch = newCompletedFetch(IsolationLevel.READ_COMMITTED, - OffsetResetStrategy.NONE, - true, - 0, + CompletedFetch completedFetch = newCompletedFetch(0, Review Comment: Could we merge the next line in? Ditto for similar places below. ########## clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchBufferTest.java: ########## @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals; + +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.message.FetchResponseData; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.utils.BufferSupplier; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.common.utils.Time; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Properties; +import java.util.Set; + +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createFetchMetricsManager; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createMetrics; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createSubscriptionState; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * This tests the {@link FetchBuffer} functionality in addition to what {@link FetcherTest} tests during the course + * of its tests. One of the main concerns of these tests are that we correctly handle both places that data is held + * internally: + * + * <ol> + * <li>A special "next in line" buffer</li> + * <li>The remainder of the buffers in a queue</li> + * </ol> + */ +public class FetchBufferTest { + + private final Time time = new MockTime(0, 0, 0); + private final TopicPartition topicAPartition0 = new TopicPartition("topic-a", 0); + private final TopicPartition topicAPartition1 = new TopicPartition("topic-a", 1); + private final TopicPartition topicAPartition2 = new TopicPartition("topic-a", 2); + private final Set<TopicPartition> allPartitions = partitions(topicAPartition0, topicAPartition1, topicAPartition2); + private LogContext logContext; + + private SubscriptionState subscriptions; + + private FetchMetricsManager metricsManager; + + @BeforeEach + public void setup() { + logContext = new LogContext(); + + Properties p = new Properties(); + p.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + p.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + p.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + ConsumerConfig config = new ConsumerConfig(p); + + subscriptions = createSubscriptionState(config, logContext); + + Metrics metrics = createMetrics(config, time); + metricsManager = createFetchMetricsManager(metrics); + } + + /** + * Verifies the basics: we can add buffered data to the queue, peek to view them, and poll to remove them. + */ + @Test + public void testBasicPeekAndPoll() { + try (FetchBuffer fetchBuffer = new FetchBuffer(logContext)) { + CompletedFetch completedFetch = completedFetch(topicAPartition0); + assertTrue(fetchBuffer.isEmpty()); + fetchBuffer.add(completedFetch); + assertTrue(fetchBuffer.hasCompletedFetches(p -> true)); + assertFalse(fetchBuffer.isEmpty()); + assertNotNull(fetchBuffer.peek()); + assertSame(completedFetch, fetchBuffer.peek()); + assertSame(completedFetch, fetchBuffer.poll()); + assertNull(fetchBuffer.peek()); + } + } + + /** + * Verifies {@link FetchBuffer#close()}} closes the buffered data for both the queue and the next-in-line buffer. + */ + @Test + public void testCloseClearsData() { + FetchBuffer fetchBuffer = null; + + try { + fetchBuffer = new FetchBuffer(logContext); + assertNull(fetchBuffer.nextInLineFetch()); + assertTrue(fetchBuffer.isEmpty()); + + fetchBuffer.add(completedFetch(topicAPartition0)); + assertFalse(fetchBuffer.isEmpty()); + + fetchBuffer.setNextInLineFetch(completedFetch(topicAPartition0)); + assertNotNull(fetchBuffer.nextInLineFetch()); + } finally { + if (fetchBuffer != null) + fetchBuffer.close(); + } + + assertNull(fetchBuffer.nextInLineFetch()); + assertTrue(fetchBuffer.isEmpty()); + } + + /** + * Tests that the buffer returns partitions for both the queue and the next-in-line buffer. + */ + @Test + public void testPartitions() { Review Comment: testBufferedPartitions? ########## clients/src/main/java/org/apache/kafka/common/internals/IdempotentCloser.java: ########## @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.internals; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; + +/** + * {@code IdempotentCloser} encapsulates some basic logic to ensure that a given resource is only closed once. + * It provides methods to invoke callbacks (via optional {@link Runnable}s) for either the <em>initial</em> close + * and/or any <em>subsequent</em> closes. + * + * <p/> + * + * Here's an example: + * + * <pre> + * + * public class MyDataFile implements Closeable { + * + * private final IdempotentCloser closer = new IdempotentCloser(); + * + * private final File file; + * + * . . . + * + * public boolean write() { + * closer.maybeThrowIllegalStateException(() -> String.format("Data file %s already closed!", file)); + * writeToFile(); + * } + * + * + * public boolean isClosed() { + * return closer.isClosed(); + * } + * + * @Override + * public void close() { + * Runnable onInitialClose = () -> { + * cleanUpFile(file); + * log.debug("Data file {} closed", file); + * }; + * Runnable onSubsequentClose = () -> { + * log.warn("Data file {} already closed!", file); + * }; + * closer.close(onInitialClose, onSubsequentClose); + * } + * } + * </pre> + * + * Note that the callbacks are optional and if unused operates as a simple means to ensure resources + * are only closed once. + */ +public class IdempotentCloser implements AutoCloseable { + + private final AtomicBoolean isClosed; + + /** + * Creates an {@code IdempotentCloser} that is not yet closed. + */ + public IdempotentCloser() { + this(false); + } + + /** + * Creates an {@code IdempotentCloser} with the given initial state. + * + * @param isClosed Initial value for underlying state + */ + public IdempotentCloser(boolean isClosed) { + this.isClosed = new AtomicBoolean(isClosed); + } + + public void maybeThrowIllegalStateException(Supplier<String> message) { Review Comment: Could we describe what the method does? ########## clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchBufferTest.java: ########## @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.clients.consumer.internals; + +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.message.FetchResponseData; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.utils.BufferSupplier; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.common.utils.Time; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Properties; +import java.util.Set; + +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createFetchMetricsManager; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createMetrics; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createSubscriptionState; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * This tests the {@link FetchBuffer} functionality in addition to what {@link FetcherTest} tests during the course + * of its tests. One of the main concerns of these tests are that we correctly handle both places that data is held + * internally: + * + * <ol> + * <li>A special "next in line" buffer</li> Review Comment: Are these comments relevant to this class? ########## clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java: ########## @@ -421,14 +309,45 @@ Node selectReadReplica(final TopicPartition partition, final Node leaderReplica, " using the leader instead.", nodeId, partition); // Note that this condition may happen due to stale metadata, so we clear preferred replica and // refresh metadata. - requestMetadataUpdate(partition); + requestMetadataUpdate(metadata, subscriptions, partition); return leaderReplica; } } else { return leaderReplica; } } + protected Map<Node, FetchSessionHandler.FetchRequestData> prepareCloseFetchSessionRequests() { Review Comment: This is only used inside AbstractFetch. Could this be private? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org