[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-09 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1224575011


##
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java:
##
@@ -0,0 +1,803 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.apache.kafka.timeline.TimelineHashSet;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.kafka.common.utils.Utils.mkSet;
+import static org.apache.kafka.test.TestUtils.assertFutureThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class CoordinatorRuntimeTest {
+private static final TopicPartition TP = new 
TopicPartition("__consumer_offsets", 0);
+
+/**
+ * An CoordinatorEventProcessor that directly executes the operations. 
This is
+ * useful in unit tests where execution in threads is not required.
+ */
+private static class MockEventProcessor implements 
CoordinatorEventProcessor {
+@Override
+public void enqueue(CoordinatorEvent event) throws 
RejectedExecutionException {
+try {
+event.run();
+} catch (Throwable ex) {
+event.complete(ex);
+}
+}
+
+@Override
+public void close() throws Exception {}
+}
+
+/**
+ * A CoordinatorLoader that always succeeds.
+ */
+private static class MockCoordinatorLoader implements 
CoordinatorLoader {
+@Override
+public CompletableFuture load(TopicPartition tp, 
CoordinatorPlayback replayable) {
+return CompletableFuture.completedFuture(null);
+}
+}
+
+/**
+ * An in-memory partition writer that accepts a maximum number of writes.
+ */
+private static class MockPartitionWriter extends 
InMemoryPartitionWriter {
+private int allowedWrites = 1;
+
+public MockPartitionWriter() {
+this(Integer.MAX_VALUE);
+}
+
+public MockPartitionWriter(int allowedWrites) {
+super(false);
+this.allowedWrites = allowedWrites;
+}
+
+@Override
+public void registerListener(TopicPartition tp, Listener listener) {
+super.registerListener(tp, listener);
+}
+
+@Override
+public void deregisterListener(TopicPartition tp, Listener listener) {
+super.deregisterListener(tp, listener);
+}
+
+@Override
+public long append(TopicPartition tp, List records) throws 
KafkaException {
+if (allowedWrites-- > 0) {
+return super.append(tp, records);
+} else {
+throw new KafkaException("append failed.");
+}
+}
+}
+
+/**
+ * A simple Coordinator implementation that stores the records into a set.
+ */
+private static class MockCoordinator implements Coordinator {
+private final TimelineHashSet records;
+
+MockCoordinator(
+SnapshotRegistry snapshotRegistry
+) {
+records = new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-09 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1224571907


##
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java:
##
@@ -0,0 +1,803 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.apache.kafka.timeline.TimelineHashSet;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.kafka.common.utils.Utils.mkSet;
+import static org.apache.kafka.test.TestUtils.assertFutureThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class CoordinatorRuntimeTest {
+private static final TopicPartition TP = new 
TopicPartition("__consumer_offsets", 0);
+
+/**
+ * An CoordinatorEventProcessor that directly executes the operations. 
This is
+ * useful in unit tests where execution in threads is not required.
+ */
+private static class MockEventProcessor implements 
CoordinatorEventProcessor {
+@Override
+public void enqueue(CoordinatorEvent event) throws 
RejectedExecutionException {
+try {
+event.run();
+} catch (Throwable ex) {
+event.complete(ex);
+}
+}
+
+@Override
+public void close() throws Exception {}
+}
+
+/**
+ * A CoordinatorLoader that always succeeds.
+ */
+private static class MockCoordinatorLoader implements 
CoordinatorLoader {
+@Override
+public CompletableFuture load(TopicPartition tp, 
CoordinatorPlayback replayable) {
+return CompletableFuture.completedFuture(null);
+}
+}
+
+/**
+ * An in-memory partition writer that accepts a maximum number of writes.
+ */
+private static class MockPartitionWriter extends 
InMemoryPartitionWriter {
+private int allowedWrites = 1;
+
+public MockPartitionWriter() {
+this(Integer.MAX_VALUE);
+}
+
+public MockPartitionWriter(int allowedWrites) {
+super(false);
+this.allowedWrites = allowedWrites;
+}
+
+@Override
+public void registerListener(TopicPartition tp, Listener listener) {
+super.registerListener(tp, listener);
+}
+
+@Override
+public void deregisterListener(TopicPartition tp, Listener listener) {
+super.deregisterListener(tp, listener);
+}
+
+@Override
+public long append(TopicPartition tp, List records) throws 
KafkaException {
+if (allowedWrites-- > 0) {
+return super.append(tp, records);
+} else {
+throw new KafkaException("append failed.");
+}
+}
+}
+
+/**
+ * A simple Coordinator implementation that stores the records into a set.
+ */
+private static class MockCoordinator implements Coordinator {
+private final TimelineHashSet records;
+
+MockCoordinator(
+SnapshotRegistry snapshotRegistry
+) {
+records = new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-09 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1224569985


##
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java:
##
@@ -0,0 +1,803 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.apache.kafka.timeline.TimelineHashSet;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.kafka.common.utils.Utils.mkSet;
+import static org.apache.kafka.test.TestUtils.assertFutureThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class CoordinatorRuntimeTest {
+private static final TopicPartition TP = new 
TopicPartition("__consumer_offsets", 0);
+
+/**
+ * An CoordinatorEventProcessor that directly executes the operations. 
This is
+ * useful in unit tests where execution in threads is not required.
+ */
+private static class MockEventProcessor implements 
CoordinatorEventProcessor {
+@Override
+public void enqueue(CoordinatorEvent event) throws 
RejectedExecutionException {
+try {
+event.run();
+} catch (Throwable ex) {
+event.complete(ex);
+}
+}
+
+@Override
+public void close() throws Exception {}
+}
+
+/**
+ * A CoordinatorLoader that always succeeds.
+ */
+private static class MockCoordinatorLoader implements 
CoordinatorLoader {
+@Override
+public CompletableFuture load(TopicPartition tp, 
CoordinatorPlayback replayable) {
+return CompletableFuture.completedFuture(null);
+}
+}
+
+/**
+ * An in-memory partition writer that accepts a maximum number of writes.
+ */
+private static class MockPartitionWriter extends 
InMemoryPartitionWriter {
+private int allowedWrites = 1;
+
+public MockPartitionWriter() {
+this(Integer.MAX_VALUE);
+}
+
+public MockPartitionWriter(int allowedWrites) {
+super(false);
+this.allowedWrites = allowedWrites;
+}
+
+@Override
+public void registerListener(TopicPartition tp, Listener listener) {
+super.registerListener(tp, listener);
+}
+
+@Override
+public void deregisterListener(TopicPartition tp, Listener listener) {
+super.deregisterListener(tp, listener);
+}
+
+@Override
+public long append(TopicPartition tp, List records) throws 
KafkaException {
+if (allowedWrites-- > 0) {
+return super.append(tp, records);
+} else {
+throw new KafkaException("append failed.");
+}
+}
+}
+
+/**
+ * A simple Coordinator implementation that stores the records into a set.
+ */
+private static class MockCoordinator implements Coordinator {
+private final TimelineHashSet records;
+
+MockCoordinator(
+SnapshotRegistry snapshotRegistry
+) {
+records = new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-09 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1224568806


##
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java:
##
@@ -0,0 +1,803 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.apache.kafka.timeline.TimelineHashSet;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.kafka.common.utils.Utils.mkSet;
+import static org.apache.kafka.test.TestUtils.assertFutureThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class CoordinatorRuntimeTest {
+private static final TopicPartition TP = new 
TopicPartition("__consumer_offsets", 0);
+
+/**
+ * An CoordinatorEventProcessor that directly executes the operations. 
This is
+ * useful in unit tests where execution in threads is not required.
+ */
+private static class MockEventProcessor implements 
CoordinatorEventProcessor {
+@Override
+public void enqueue(CoordinatorEvent event) throws 
RejectedExecutionException {
+try {
+event.run();
+} catch (Throwable ex) {
+event.complete(ex);
+}
+}
+
+@Override
+public void close() throws Exception {}
+}
+
+/**
+ * A CoordinatorLoader that always succeeds.
+ */
+private static class MockCoordinatorLoader implements 
CoordinatorLoader {
+@Override
+public CompletableFuture load(TopicPartition tp, 
CoordinatorPlayback replayable) {
+return CompletableFuture.completedFuture(null);
+}
+}
+
+/**
+ * An in-memory partition writer that accepts a maximum number of writes.
+ */
+private static class MockPartitionWriter extends 
InMemoryPartitionWriter {
+private int allowedWrites = 1;
+
+public MockPartitionWriter() {
+this(Integer.MAX_VALUE);
+}
+
+public MockPartitionWriter(int allowedWrites) {
+super(false);
+this.allowedWrites = allowedWrites;
+}
+
+@Override
+public void registerListener(TopicPartition tp, Listener listener) {
+super.registerListener(tp, listener);
+}
+
+@Override
+public void deregisterListener(TopicPartition tp, Listener listener) {
+super.deregisterListener(tp, listener);
+}
+
+@Override
+public long append(TopicPartition tp, List records) throws 
KafkaException {
+if (allowedWrites-- > 0) {
+return super.append(tp, records);
+} else {
+throw new KafkaException("append failed.");
+}
+}
+}
+
+/**
+ * A simple Coordinator implementation that stores the records into a set.
+ */
+private static class MockCoordinator implements Coordinator {
+private final TimelineHashSet records;
+
+MockCoordinator(
+SnapshotRegistry snapshotRegistry
+) {
+records = new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-07 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r102869


##
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java:
##
@@ -0,0 +1,803 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.apache.kafka.timeline.TimelineHashSet;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.kafka.common.utils.Utils.mkSet;
+import static org.apache.kafka.test.TestUtils.assertFutureThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class CoordinatorRuntimeTest {
+private static final TopicPartition TP = new 
TopicPartition("__consumer_offsets", 0);
+
+/**
+ * An CoordinatorEventProcessor that directly executes the operations. 
This is
+ * useful in unit tests where execution in threads is not required.
+ */
+private static class MockEventProcessor implements 
CoordinatorEventProcessor {
+@Override
+public void enqueue(CoordinatorEvent event) throws 
RejectedExecutionException {
+try {
+event.run();
+} catch (Throwable ex) {
+event.complete(ex);
+}
+}
+
+@Override
+public void close() throws Exception {}
+}
+
+/**
+ * A CoordinatorLoader that always succeeds.
+ */
+private static class MockCoordinatorLoader implements 
CoordinatorLoader {
+@Override
+public CompletableFuture load(TopicPartition tp, 
CoordinatorPlayback replayable) {
+return CompletableFuture.completedFuture(null);
+}
+}
+
+/**
+ * An in-memory partition writer that accepts a maximum number of writes.
+ */
+private static class MockPartitionWriter extends 
InMemoryPartitionWriter {
+private int allowedWrites = 1;
+
+public MockPartitionWriter() {
+this(Integer.MAX_VALUE);
+}
+
+public MockPartitionWriter(int allowedWrites) {
+super(false);
+this.allowedWrites = allowedWrites;
+}
+
+@Override
+public void registerListener(TopicPartition tp, Listener listener) {
+super.registerListener(tp, listener);
+}
+
+@Override
+public void deregisterListener(TopicPartition tp, Listener listener) {
+super.deregisterListener(tp, listener);
+}
+
+@Override
+public long append(TopicPartition tp, List records) throws 
KafkaException {
+if (allowedWrites-- > 0) {
+return super.append(tp, records);
+} else {
+throw new KafkaException("append failed.");
+}
+}
+}
+
+/**
+ * A simple Coordinator implementation that stores the records into a set.
+ */
+private static class MockCoordinator implements Coordinator {
+private final TimelineHashSet records;
+
+MockCoordinator(
+SnapshotRegistry snapshotRegistry
+) {
+records = new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-07 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1222198170


##
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java:
##
@@ -0,0 +1,803 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.apache.kafka.timeline.TimelineHashSet;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.kafka.common.utils.Utils.mkSet;
+import static org.apache.kafka.test.TestUtils.assertFutureThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class CoordinatorRuntimeTest {
+private static final TopicPartition TP = new 
TopicPartition("__consumer_offsets", 0);
+
+/**
+ * An CoordinatorEventProcessor that directly executes the operations. 
This is
+ * useful in unit tests where execution in threads is not required.
+ */
+private static class MockEventProcessor implements 
CoordinatorEventProcessor {
+@Override
+public void enqueue(CoordinatorEvent event) throws 
RejectedExecutionException {
+try {
+event.run();
+} catch (Throwable ex) {
+event.complete(ex);
+}
+}
+
+@Override
+public void close() throws Exception {}
+}
+
+/**
+ * A CoordinatorLoader that always succeeds.
+ */
+private static class MockCoordinatorLoader implements 
CoordinatorLoader {
+@Override
+public CompletableFuture load(TopicPartition tp, 
CoordinatorPlayback replayable) {
+return CompletableFuture.completedFuture(null);
+}
+}
+
+/**
+ * An in-memory partition writer that accepts a maximum number of writes.
+ */
+private static class MockPartitionWriter extends 
InMemoryPartitionWriter {
+private int allowedWrites = 1;
+
+public MockPartitionWriter() {
+this(Integer.MAX_VALUE);
+}
+
+public MockPartitionWriter(int allowedWrites) {
+super(false);
+this.allowedWrites = allowedWrites;
+}
+
+@Override
+public void registerListener(TopicPartition tp, Listener listener) {
+super.registerListener(tp, listener);
+}
+
+@Override
+public void deregisterListener(TopicPartition tp, Listener listener) {
+super.deregisterListener(tp, listener);
+}
+
+@Override
+public long append(TopicPartition tp, List records) throws 
KafkaException {
+if (allowedWrites-- > 0) {
+return super.append(tp, records);
+} else {
+throw new KafkaException("append failed.");
+}
+}
+}
+
+/**
+ * A simple Coordinator implementation that stores the records into a set.
+ */
+private static class MockCoordinator implements Coordinator {
+private final TimelineHashSet records;
+
+MockCoordinator(
+SnapshotRegistry snapshotRegistry
+) {
+records = new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-07 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1222177376


##
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java:
##
@@ -0,0 +1,803 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.apache.kafka.timeline.TimelineHashSet;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.kafka.common.utils.Utils.mkSet;
+import static org.apache.kafka.test.TestUtils.assertFutureThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class CoordinatorRuntimeTest {
+private static final TopicPartition TP = new 
TopicPartition("__consumer_offsets", 0);
+
+/**
+ * An CoordinatorEventProcessor that directly executes the operations. 
This is
+ * useful in unit tests where execution in threads is not required.
+ */
+private static class MockEventProcessor implements 
CoordinatorEventProcessor {
+@Override
+public void enqueue(CoordinatorEvent event) throws 
RejectedExecutionException {
+try {
+event.run();
+} catch (Throwable ex) {
+event.complete(ex);
+}
+}
+
+@Override
+public void close() throws Exception {}
+}
+
+/**
+ * A CoordinatorLoader that always succeeds.
+ */
+private static class MockCoordinatorLoader implements 
CoordinatorLoader {
+@Override
+public CompletableFuture load(TopicPartition tp, 
CoordinatorPlayback replayable) {
+return CompletableFuture.completedFuture(null);
+}
+}
+
+/**
+ * An in-memory partition writer that accepts a maximum number of writes.
+ */
+private static class MockPartitionWriter extends 
InMemoryPartitionWriter {
+private int allowedWrites = 1;
+
+public MockPartitionWriter() {
+this(Integer.MAX_VALUE);
+}
+
+public MockPartitionWriter(int allowedWrites) {
+super(false);
+this.allowedWrites = allowedWrites;
+}
+
+@Override
+public void registerListener(TopicPartition tp, Listener listener) {
+super.registerListener(tp, listener);
+}
+
+@Override
+public void deregisterListener(TopicPartition tp, Listener listener) {
+super.deregisterListener(tp, listener);
+}
+
+@Override
+public long append(TopicPartition tp, List records) throws 
KafkaException {
+if (allowedWrites-- > 0) {
+return super.append(tp, records);
+} else {
+throw new KafkaException("append failed.");
+}
+}
+}
+
+/**
+ * A simple Coordinator implementation that stores the records into a set.
+ */
+private static class MockCoordinator implements Coordinator {
+private final TimelineHashSet records;
+
+MockCoordinator(
+SnapshotRegistry snapshotRegistry
+) {
+records = new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-07 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1222175645


##
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java:
##
@@ -0,0 +1,803 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.apache.kafka.timeline.TimelineHashSet;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.kafka.common.utils.Utils.mkSet;
+import static org.apache.kafka.test.TestUtils.assertFutureThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class CoordinatorRuntimeTest {
+private static final TopicPartition TP = new 
TopicPartition("__consumer_offsets", 0);
+
+/**
+ * An CoordinatorEventProcessor that directly executes the operations. 
This is
+ * useful in unit tests where execution in threads is not required.
+ */
+private static class MockEventProcessor implements 
CoordinatorEventProcessor {
+@Override
+public void enqueue(CoordinatorEvent event) throws 
RejectedExecutionException {
+try {
+event.run();
+} catch (Throwable ex) {
+event.complete(ex);
+}
+}
+
+@Override
+public void close() throws Exception {}
+}
+
+/**
+ * A CoordinatorLoader that always succeeds.
+ */
+private static class MockCoordinatorLoader implements 
CoordinatorLoader {
+@Override
+public CompletableFuture load(TopicPartition tp, 
CoordinatorPlayback replayable) {
+return CompletableFuture.completedFuture(null);
+}
+}
+
+/**
+ * An in-memory partition writer that accepts a maximum number of writes.
+ */
+private static class MockPartitionWriter extends 
InMemoryPartitionWriter {
+private int allowedWrites = 1;
+
+public MockPartitionWriter() {
+this(Integer.MAX_VALUE);
+}
+
+public MockPartitionWriter(int allowedWrites) {
+super(false);
+this.allowedWrites = allowedWrites;
+}
+
+@Override
+public void registerListener(TopicPartition tp, Listener listener) {
+super.registerListener(tp, listener);
+}
+
+@Override
+public void deregisterListener(TopicPartition tp, Listener listener) {
+super.deregisterListener(tp, listener);
+}
+
+@Override
+public long append(TopicPartition tp, List records) throws 
KafkaException {
+if (allowedWrites-- > 0) {
+return super.append(tp, records);
+} else {
+throw new KafkaException("append failed.");
+}
+}
+}
+
+/**
+ * A simple Coordinator implementation that stores the records into a set.
+ */
+private static class MockCoordinator implements Coordinator {
+private final TimelineHashSet records;
+
+MockCoordinator(
+SnapshotRegistry snapshotRegistry
+) {
+records = new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-07 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1222169738


##
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java:
##
@@ -0,0 +1,803 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.apache.kafka.timeline.TimelineHashSet;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.kafka.common.utils.Utils.mkSet;
+import static org.apache.kafka.test.TestUtils.assertFutureThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class CoordinatorRuntimeTest {
+private static final TopicPartition TP = new 
TopicPartition("__consumer_offsets", 0);
+
+/**
+ * An CoordinatorEventProcessor that directly executes the operations. 
This is
+ * useful in unit tests where execution in threads is not required.
+ */
+private static class MockEventProcessor implements 
CoordinatorEventProcessor {
+@Override
+public void enqueue(CoordinatorEvent event) throws 
RejectedExecutionException {
+try {
+event.run();
+} catch (Throwable ex) {
+event.complete(ex);
+}
+}
+
+@Override
+public void close() throws Exception {}
+}
+
+/**
+ * A CoordinatorLoader that always succeeds.
+ */
+private static class MockCoordinatorLoader implements 
CoordinatorLoader {
+@Override
+public CompletableFuture load(TopicPartition tp, 
CoordinatorPlayback replayable) {
+return CompletableFuture.completedFuture(null);
+}
+}
+
+/**
+ * An in-memory partition writer that accepts a maximum number of writes.
+ */
+private static class MockPartitionWriter extends 
InMemoryPartitionWriter {
+private int allowedWrites = 1;
+
+public MockPartitionWriter() {
+this(Integer.MAX_VALUE);
+}
+
+public MockPartitionWriter(int allowedWrites) {
+super(false);
+this.allowedWrites = allowedWrites;
+}
+
+@Override
+public void registerListener(TopicPartition tp, Listener listener) {
+super.registerListener(tp, listener);
+}
+
+@Override
+public void deregisterListener(TopicPartition tp, Listener listener) {
+super.deregisterListener(tp, listener);
+}
+
+@Override
+public long append(TopicPartition tp, List records) throws 
KafkaException {
+if (allowedWrites-- > 0) {
+return super.append(tp, records);
+} else {
+throw new KafkaException("append failed.");
+}
+}
+}
+
+/**
+ * A simple Coordinator implementation that stores the records into a set.
+ */
+private static class MockCoordinator implements Coordinator {
+private final TimelineHashSet records;
+
+MockCoordinator(
+SnapshotRegistry snapshotRegistry
+) {
+records = new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-07 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1222158858


##
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java:
##
@@ -0,0 +1,803 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.apache.kafka.timeline.TimelineHashSet;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.kafka.common.utils.Utils.mkSet;
+import static org.apache.kafka.test.TestUtils.assertFutureThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class CoordinatorRuntimeTest {
+private static final TopicPartition TP = new 
TopicPartition("__consumer_offsets", 0);
+
+/**
+ * An CoordinatorEventProcessor that directly executes the operations. 
This is
+ * useful in unit tests where execution in threads is not required.
+ */
+private static class MockEventProcessor implements 
CoordinatorEventProcessor {
+@Override
+public void enqueue(CoordinatorEvent event) throws 
RejectedExecutionException {
+try {
+event.run();
+} catch (Throwable ex) {
+event.complete(ex);
+}
+}
+
+@Override
+public void close() throws Exception {}
+}
+
+/**
+ * A CoordinatorLoader that always succeeds.
+ */
+private static class MockCoordinatorLoader implements 
CoordinatorLoader {
+@Override
+public CompletableFuture load(TopicPartition tp, 
CoordinatorPlayback replayable) {
+return CompletableFuture.completedFuture(null);
+}
+}
+
+/**
+ * An in-memory partition writer that accepts a maximum number of writes.
+ */
+private static class MockPartitionWriter extends 
InMemoryPartitionWriter {
+private int allowedWrites = 1;
+
+public MockPartitionWriter() {
+this(Integer.MAX_VALUE);
+}
+
+public MockPartitionWriter(int allowedWrites) {
+super(false);
+this.allowedWrites = allowedWrites;
+}
+
+@Override
+public void registerListener(TopicPartition tp, Listener listener) {
+super.registerListener(tp, listener);
+}
+
+@Override
+public void deregisterListener(TopicPartition tp, Listener listener) {
+super.deregisterListener(tp, listener);
+}
+
+@Override
+public long append(TopicPartition tp, List records) throws 
KafkaException {
+if (allowedWrites-- > 0) {
+return super.append(tp, records);
+} else {
+throw new KafkaException("append failed.");
+}
+}
+}
+
+/**
+ * A simple Coordinator implementation that stores the records into a set.
+ */
+private static class MockCoordinator implements Coordinator {
+private final TimelineHashSet records;
+
+MockCoordinator(
+SnapshotRegistry snapshotRegistry
+) {
+records = new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-07 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1222148781


##
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java:
##
@@ -0,0 +1,803 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.apache.kafka.timeline.TimelineHashSet;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.kafka.common.utils.Utils.mkSet;
+import static org.apache.kafka.test.TestUtils.assertFutureThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class CoordinatorRuntimeTest {
+private static final TopicPartition TP = new 
TopicPartition("__consumer_offsets", 0);
+
+/**
+ * An CoordinatorEventProcessor that directly executes the operations. 
This is
+ * useful in unit tests where execution in threads is not required.
+ */
+private static class MockEventProcessor implements 
CoordinatorEventProcessor {
+@Override
+public void enqueue(CoordinatorEvent event) throws 
RejectedExecutionException {
+try {
+event.run();
+} catch (Throwable ex) {
+event.complete(ex);
+}
+}
+
+@Override
+public void close() throws Exception {}
+}
+
+/**
+ * A CoordinatorLoader that always succeeds.
+ */
+private static class MockCoordinatorLoader implements 
CoordinatorLoader {
+@Override
+public CompletableFuture load(TopicPartition tp, 
CoordinatorPlayback replayable) {
+return CompletableFuture.completedFuture(null);
+}
+}
+
+/**
+ * An in-memory partition writer that accepts a maximum number of writes.
+ */
+private static class MockPartitionWriter extends 
InMemoryPartitionWriter {
+private int allowedWrites = 1;
+
+public MockPartitionWriter() {
+this(Integer.MAX_VALUE);
+}
+
+public MockPartitionWriter(int allowedWrites) {
+super(false);
+this.allowedWrites = allowedWrites;
+}
+
+@Override
+public void registerListener(TopicPartition tp, Listener listener) {
+super.registerListener(tp, listener);
+}
+
+@Override
+public void deregisterListener(TopicPartition tp, Listener listener) {
+super.deregisterListener(tp, listener);
+}
+
+@Override
+public long append(TopicPartition tp, List records) throws 
KafkaException {
+if (allowedWrites-- > 0) {
+return super.append(tp, records);
+} else {
+throw new KafkaException("append failed.");
+}
+}
+}
+
+/**
+ * A simple Coordinator implementation that stores the records into a set.
+ */
+private static class MockCoordinator implements Coordinator {
+private final TimelineHashSet records;
+
+MockCoordinator(
+SnapshotRegistry snapshotRegistry
+) {
+records = new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-07 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1222140560


##
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java:
##
@@ -0,0 +1,803 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.apache.kafka.timeline.TimelineHashSet;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.kafka.common.utils.Utils.mkSet;
+import static org.apache.kafka.test.TestUtils.assertFutureThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class CoordinatorRuntimeTest {
+private static final TopicPartition TP = new 
TopicPartition("__consumer_offsets", 0);
+
+/**
+ * An CoordinatorEventProcessor that directly executes the operations. 
This is
+ * useful in unit tests where execution in threads is not required.
+ */
+private static class MockEventProcessor implements 
CoordinatorEventProcessor {
+@Override
+public void enqueue(CoordinatorEvent event) throws 
RejectedExecutionException {
+try {
+event.run();
+} catch (Throwable ex) {
+event.complete(ex);
+}
+}
+
+@Override
+public void close() throws Exception {}
+}
+
+/**
+ * A CoordinatorLoader that always succeeds.
+ */
+private static class MockCoordinatorLoader implements 
CoordinatorLoader {
+@Override
+public CompletableFuture load(TopicPartition tp, 
CoordinatorPlayback replayable) {
+return CompletableFuture.completedFuture(null);
+}
+}
+
+/**
+ * An in-memory partition writer that accepts a maximum number of writes.
+ */
+private static class MockPartitionWriter extends 
InMemoryPartitionWriter {
+private int allowedWrites = 1;
+
+public MockPartitionWriter() {
+this(Integer.MAX_VALUE);
+}
+
+public MockPartitionWriter(int allowedWrites) {
+super(false);
+this.allowedWrites = allowedWrites;
+}
+
+@Override
+public void registerListener(TopicPartition tp, Listener listener) {
+super.registerListener(tp, listener);
+}
+
+@Override
+public void deregisterListener(TopicPartition tp, Listener listener) {
+super.deregisterListener(tp, listener);
+}
+
+@Override
+public long append(TopicPartition tp, List records) throws 
KafkaException {
+if (allowedWrites-- > 0) {
+return super.append(tp, records);
+} else {
+throw new KafkaException("append failed.");
+}
+}
+}
+
+/**
+ * A simple Coordinator implementation that stores the records into a set.
+ */
+private static class MockCoordinator implements Coordinator {
+private final TimelineHashSet records;
+
+MockCoordinator(
+SnapshotRegistry snapshotRegistry
+) {
+records = new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-07 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1222117410


##
group-coordinator/src/test/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntimeTest.java:
##
@@ -0,0 +1,803 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.apache.kafka.timeline.TimelineHashSet;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.kafka.common.utils.Utils.mkSet;
+import static org.apache.kafka.test.TestUtils.assertFutureThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class CoordinatorRuntimeTest {
+private static final TopicPartition TP = new 
TopicPartition("__consumer_offsets", 0);
+
+/**
+ * An CoordinatorEventProcessor that directly executes the operations. 
This is
+ * useful in unit tests where execution in threads is not required.
+ */
+private static class MockEventProcessor implements 
CoordinatorEventProcessor {
+@Override
+public void enqueue(CoordinatorEvent event) throws 
RejectedExecutionException {
+try {
+event.run();
+} catch (Throwable ex) {
+event.complete(ex);
+}
+}
+
+@Override
+public void close() throws Exception {}
+}
+
+/**
+ * A CoordinatorLoader that always succeeds.
+ */
+private static class MockCoordinatorLoader implements 
CoordinatorLoader {
+@Override
+public CompletableFuture load(TopicPartition tp, 
CoordinatorPlayback replayable) {
+return CompletableFuture.completedFuture(null);
+}
+}
+
+/**
+ * An in-memory partition writer that accepts a maximum number of writes.
+ */
+private static class MockPartitionWriter extends 
InMemoryPartitionWriter {
+private int allowedWrites = 1;

Review Comment:
   nit: we don't need to set 1 right? It will not be used.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-07 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1222115234


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/MultiThreadedEventProcessor.java:
##
@@ -39,7 +40,7 @@ public class MultiThreadedEventProcessor implements 
CoordinatorEventProcessor {
 /**
  * The accumulator.
  */
-private final EventAccumulator accumulator;

Review Comment:
   Is this changed from integer to topic partition so that we can use different 
coordinator state partitions (ie consumer offsets vs transactional state)



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-07 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1222035271


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorEvent.java:
##
@@ -16,21 +16,23 @@
  */
 package org.apache.kafka.coordinator.group.runtime;
 
+import org.apache.kafka.common.TopicPartition;
+
 /**
  * The base event type used by all events processed in the
  * coordinator runtime.
  */
-public interface CoordinatorEvent extends EventAccumulator.Event {
+public interface CoordinatorEvent extends 
EventAccumulator.Event {
 
 /**
- * Runs the event.
+ * Executes the event.
  */
 void run();
 
 /**
  * Completes the event with the provided exception.
  *
- * @param exception An exception to complete the event with.
+ * @param exception An exception if the processing of the event failed or 
null.

Review Comment:
   nit: maybe "An exception if the processing of the event failed or null 
otherwise"
   I read this as exception if the event failed or was null. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-07 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1222033908


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1009 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-07 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1222029897


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1040 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorBuilderSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-06 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1220560961


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1040 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorBuilderSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-06 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1220554821


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1009 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-06 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1220268552


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1009 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-06 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1220268287


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1009 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-06 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1220267344


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1009 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-05 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1218619228


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1009 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-05 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1218561662


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1009 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-05 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1218601535


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1009 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-05 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1218593915


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1009 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-05 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1218561662


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1009 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-05 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1218547896


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1009 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-05 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1218537640


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1009 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-05 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1218533754


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,1009 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework exposes an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-02 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1214957871


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,959 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework expose an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-02 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1214930728


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,959 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework expose an asynchronous, future based, API to the 
world. All the operations
+ * are executed by an CoordinatorEventProcessor. The processor guarantees that 
operations for a
+ * single partition or state machine are not processed concurrently.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+class CoordinatorRuntime, U> {
+
+/**
+ * Builder to create a CoordinatorRuntime.
+ *
+ * @param  The type of the state machine.
+ * @param  The type of the record.
+ */
+public static class Builder, U> {
+private LogContext logContext;
+private CoordinatorEventProcessor eventProcessor;
+private PartitionWriter partitionWriter;
+private CoordinatorLoader loader;
+private CoordinatorBuilderSupplier coordinatorBuilderSupplier;
+
+public Builder withLogContext(LogContext logContext) {
+this.logContext = logContext;
+return this;
+}
+
+public Builder withEventProcessor(CoordinatorEventProcessor 
eventProcessor) {
+this.eventProcessor = eventProcessor;
+return this;
+}
+
+public Builder withPartitionWriter(PartitionWriter 
partitionWriter) {
+this.partitionWriter = partitionWriter;
+return this;
+}
+
+public Builder withLoader(CoordinatorLoader loader) {
+this.loader = loader;
+return this;
+}
+
+public Builder 
withCoordinatorStateMachineSupplier(CoordinatorBuilderSupplier 
coordinatorBuilderSupplier) {
+this.coordinatorBuilderSupplier = coordinatorBuilderSupplier;
+return this;
+}
+
+public CoordinatorRuntime build() {
+if (logContext == null)
+logContext = new LogContext();
+if (eventProcessor == null)
+throw new 

[GitHub] [kafka] jolshan commented on a diff in pull request #13795: KAFKA-14462; [17/N] Add CoordinatorRuntime

2023-06-02 Thread via GitHub


jolshan commented on code in PR #13795:
URL: https://github.com/apache/kafka/pull/13795#discussion_r1214835504


##
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/runtime/CoordinatorRuntime.java:
##
@@ -0,0 +1,959 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.coordinator.group.runtime;
+
+import org.apache.kafka.common.KafkaException;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.CoordinatorLoadInProgressException;
+import org.apache.kafka.common.errors.NotCoordinatorException;
+import org.apache.kafka.common.protocol.Errors;
+import org.apache.kafka.common.utils.LogContext;
+import org.apache.kafka.deferred.DeferredEvent;
+import org.apache.kafka.deferred.DeferredEventQueue;
+import org.apache.kafka.timeline.SnapshotRegistry;
+import org.slf4j.Logger;
+
+import java.util.HashSet;
+import java.util.OptionalLong;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * The CoordinatorRuntime provides a framework to implement coordinators such 
as the group coordinator
+ * or the transaction coordinator.
+ *
+ * The runtime framework maps each underlying partitions (e.g. 
__consumer_offsets) that that broker is a
+ * leader of to a coordinator replicated state machine. A replicated state 
machine holds the hard and soft
+ * state of all the objects (e.g. groups or offsets) assigned to the 
partition. The hard state is stored in
+ * timeline datastructures backed by a SnapshotRegistry. The runtime supports 
two type of operations
+ * on state machines: (1) Writes and (2) Reads.
+ *
+ * (1) A write operation, aka a request, can read the full and potentially 
**uncommitted** state from state
+ * machine to handle the operation. A write operation typically generates a 
response and a list of
+ * records. The records are applies to the state machine and persisted to the 
partition. The response
+ * is parked until the records are committed and delivered when they are.
+ *
+ * (2) A read operation, aka a request, can only read the committed state from 
the state machine to handle
+ * the operation. A read operation typically generates a response that is 
immediately completed.
+ *
+ * The runtime framework expose an asynchronous, future based, API to the 
world. All the operations

Review Comment:
   nit: exposes



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org