cadonna commented on code in PR #14193:
URL: https://github.com/apache/kafka/pull/14193#discussion_r1363559332


##########
streams/src/test/java/org/apache/kafka/streams/processor/internals/SynchronizedPartitionGroupTest.java:
##########
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.processor.internals;
+import org.apache.kafka.clients.consumer.ConsumerRecord;

Review Comment:
   ```suggestion
   
   import org.apache.kafka.clients.consumer.ConsumerRecord;
   ```



##########
streams/src/test/java/org/apache/kafka/streams/integration/NamedTopologyIntegrationTest.java:
##########
@@ -676,7 +677,7 @@ public void 
shouldAddToEmptyInitialTopologyRemoveResetOffsetsThenAddSameNamedTop
         }
     }
 
-    @Test
+    @Ignore

Review Comment:
   Could you add a reason why you want to ignore this test?



##########
streams/src/test/java/org/apache/kafka/streams/processor/internals/SynchronizedPartitionGroupTest.java:
##########
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.processor.internals;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.common.TopicPartition;
+import 
org.apache.kafka.streams.processor.internals.AbstractPartitionGroup.RecordInfo;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;

Review Comment:
   Could you please use JUnit 5 for new tests?
   
   You need to replace the three lines above with:
   ```
   import org.junit.jupiter.api.AfterEach;
   import org.junit.jupiter.api.BeforeEach;
   import org.junit.jupiter.api.Test;
   ``` 



##########
streams/src/test/java/org/apache/kafka/streams/processor/internals/SynchronizedPartitionGroupTest.java:
##########
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.processor.internals;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.common.TopicPartition;
+import 
org.apache.kafka.streams.processor.internals.AbstractPartitionGroup.RecordInfo;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import java.util.Collections;
+import java.util.Set;
+import java.util.function.Function;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.times;
+
+public class SynchronizedPartitionGroupTest {
+
+    @Mock
+    private AbstractPartitionGroup wrapped;
+
+    private SynchronizedPartitionGroup synchronizedPartitionGroup;
+
+    private AutoCloseable closeable;
+
+    @Before
+    public void setUp() {
+        closeable = MockitoAnnotations.openMocks(this);
+        synchronizedPartitionGroup = new SynchronizedPartitionGroup(wrapped);
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        closeable.close();
+    }
+
+    @Test
+    public void testReadyToProcess() {
+        final long wallClockTime = 0L;
+        when(wrapped.readyToProcess(wallClockTime)).thenReturn(true);
+
+        synchronizedPartitionGroup.readyToProcess(wallClockTime);
+
+        verify(wrapped, times(1)).readyToProcess(wallClockTime);
+    }
+
+    @Test
+    public void testUpdatePartitions() {
+        final Set<TopicPartition> inputPartitions = Collections.singleton(new 
TopicPartition("topic", 0));
+        @SuppressWarnings("unchecked") final Function<TopicPartition, 
RecordQueue> recordQueueCreator = (Function<TopicPartition, RecordQueue>) 
mock(Function.class);
+
+        synchronizedPartitionGroup.updatePartitions(inputPartitions, 
recordQueueCreator);
+
+        verify(wrapped, times(1)).updatePartitions(inputPartitions, 
recordQueueCreator);
+    }
+
+    @Test
+    public void testSetPartitionTime() {
+        final TopicPartition partition = new TopicPartition("topic", 0);
+        final long partitionTime = System.currentTimeMillis();

Review Comment:
   nit:
   You could also just set this to a long here and in other places instead of 
calling a system method. Maybe we can save on execution time. 



##########
streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java:
##########
@@ -286,6 +295,103 @@ private void 
updateExistingStandbyTaskIfStandbyIsReassignedWithoutStateUpdater(f
         Mockito.verify(standbyTask).resume();
     }
 
+    @Test
+    public void shouldLockAllTasksOnCorruptionWithProcessingThreads() {
+        final StreamTask activeTask1 = statefulTask(taskId00, 
taskId00ChangelogPartitions)
+            .inState(State.RUNNING)
+            .withInputPartitions(taskId00Partitions).build();
+        final TasksRegistry tasks = Mockito.mock(TasksRegistry.class);
+        final TaskManager taskManager = 
setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true, true);
+        when(tasks.activeTaskIds()).thenReturn(mkSet(taskId00, taskId01));
+        when(tasks.task(taskId00)).thenReturn(activeTask1);
+        final KafkaFuture<Void> mockFuture = KafkaFuture.completedFuture(null);
+        when(schedulingTaskManager.lockTasks(any())).thenReturn(mockFuture);
+        expect(consumer.assignment()).andReturn(emptySet()).anyTimes();
+        replay(consumer);
+
+        taskManager.handleCorruption(mkSet(taskId00));
+
+        verify(consumer);
+        Mockito.verify(schedulingTaskManager).lockTasks(mkSet(taskId00, 
taskId01));
+        Mockito.verify(schedulingTaskManager).unlockTasks(mkSet(taskId00, 
taskId01));
+    }
+
+    @Test
+    public void shouldLockCommitableTasksOnCorruptionWithProcessingThreads() {
+        final StreamTask activeTask1 = statefulTask(taskId00, 
taskId00ChangelogPartitions)
+            .inState(State.RUNNING)
+            .withInputPartitions(taskId00Partitions).build();
+        final StreamTask activeTask2 = statefulTask(taskId01, 
taskId01ChangelogPartitions)
+            .inState(State.RUNNING)
+            .withInputPartitions(taskId01Partitions).build();

Review Comment:
   Why do you use two tasks here, but you only pass one to the commit method? 
   I would pass both to the commit method to verify whether the commit can 
handle multiple tasks.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to