github-advanced-security[bot] commented on code in PR #18466:
URL: https://github.com/apache/druid/pull/18466#discussion_r2360420331


##########
embedded-tests/src/test/java/org/apache/druid/testing/embedded/indexing/KafkaClusterMetricsTest.java:
##########
@@ -292,6 +366,74 @@
     
cluster.callApi().postSupervisor(kafkaSupervisorSpec.createSuspendedSpec());
   }
 
+  @Test
+  @Timeout(60)
+  public void 
test_ingest50kRows_ofSelfClusterMetricsWithScaleIns_andVerifyValues()
+  {
+    final int maxRowsPerSegment = 1000;
+    final int expectedSegmentsHandedOff = 50;
+
+    final int initialTaskCount = 3;
+
+    // Submit and start a supervisor with scale-in configuration
+    final String supervisorId = dataSource + "_supe";
+    AutoScalerConfig autoScalerConfig = new LagBasedAutoScalerConfigBuilder()
+        .withLagCollectionIntervalMillis(500)
+        .withLagCollectionRangeMillis(1000)
+        .withEnableTaskAutoScaler(true)
+        .withScaleActionPeriodMillis(10000)
+        .withScaleActionStartDelayMillis(5000)
+        .withScaleOutThreshold(10000)
+        .withScaleInThreshold(1)
+        .withTriggerScaleOutFractionThreshold(0.9)
+        .withTriggerScaleInFractionThreshold(0.001)
+        .withTaskCountMax(initialTaskCount)
+        .withTaskCountStart(initialTaskCount)
+        .withScaleOutStep(0)
+        .withScaleInStep(1)
+        .withMinTriggerScaleActionFrequencyMillis(10000)
+        .withStopTaskCountRatio(1.0)
+        .build();
+
+    final KafkaSupervisorSpec kafkaSupervisorSpec = createKafkaSupervisor(
+        supervisorId,
+        initialTaskCount,
+        maxRowsPerSegment,
+        autoScalerConfig,
+        true
+    );
+
+    Assertions.assertEquals(
+        supervisorId,
+        cluster.callApi().postSupervisor(kafkaSupervisorSpec)
+    );
+
+    overlord.latchableEmitter().waitForEventAggregate(
+        event -> event.hasMetricName("task/autoScaler/scaleActionTime"),
+        agg -> agg.hasSumAtLeast(2)
+    );
+
+    indexer.latchableEmitter().waitForEventAggregate(
+        event -> event.hasMetricName("ingest/handoff/count"),
+        agg -> agg.hasSumAtLeast(expectedSegmentsHandedOff)
+    );
+
+    final int numSegments = Integer.parseInt(
+        cluster.runSql("SELECT COUNT(*) FROM sys.segments WHERE datasource = 
'%s'", dataSource)
+    );

Review Comment:
   ## Missing catch of NumberFormatException
   
   Potential uncaught 'java.lang.NumberFormatException'.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/10340)



##########
embedded-tests/src/test/java/org/apache/druid/testing/embedded/indexing/KafkaClusterMetricsTest.java:
##########
@@ -292,6 +366,74 @@
     
cluster.callApi().postSupervisor(kafkaSupervisorSpec.createSuspendedSpec());
   }
 
+  @Test
+  @Timeout(60)
+  public void 
test_ingest50kRows_ofSelfClusterMetricsWithScaleIns_andVerifyValues()
+  {
+    final int maxRowsPerSegment = 1000;
+    final int expectedSegmentsHandedOff = 50;
+
+    final int initialTaskCount = 3;
+
+    // Submit and start a supervisor with scale-in configuration
+    final String supervisorId = dataSource + "_supe";
+    AutoScalerConfig autoScalerConfig = new LagBasedAutoScalerConfigBuilder()
+        .withLagCollectionIntervalMillis(500)
+        .withLagCollectionRangeMillis(1000)
+        .withEnableTaskAutoScaler(true)
+        .withScaleActionPeriodMillis(10000)
+        .withScaleActionStartDelayMillis(5000)
+        .withScaleOutThreshold(10000)
+        .withScaleInThreshold(1)
+        .withTriggerScaleOutFractionThreshold(0.9)
+        .withTriggerScaleInFractionThreshold(0.001)
+        .withTaskCountMax(initialTaskCount)
+        .withTaskCountStart(initialTaskCount)
+        .withScaleOutStep(0)
+        .withScaleInStep(1)
+        .withMinTriggerScaleActionFrequencyMillis(10000)
+        .withStopTaskCountRatio(1.0)
+        .build();
+
+    final KafkaSupervisorSpec kafkaSupervisorSpec = createKafkaSupervisor(
+        supervisorId,
+        initialTaskCount,
+        maxRowsPerSegment,
+        autoScalerConfig,
+        true
+    );
+
+    Assertions.assertEquals(
+        supervisorId,
+        cluster.callApi().postSupervisor(kafkaSupervisorSpec)
+    );
+
+    overlord.latchableEmitter().waitForEventAggregate(
+        event -> event.hasMetricName("task/autoScaler/scaleActionTime"),
+        agg -> agg.hasSumAtLeast(2)
+    );
+
+    indexer.latchableEmitter().waitForEventAggregate(
+        event -> event.hasMetricName("ingest/handoff/count"),
+        agg -> agg.hasSumAtLeast(expectedSegmentsHandedOff)
+    );
+
+    final int numSegments = Integer.parseInt(
+        cluster.runSql("SELECT COUNT(*) FROM sys.segments WHERE datasource = 
'%s'", dataSource)
+    );
+    Assertions.assertTrue(numSegments >= expectedSegmentsHandedOff);
+
+    final int numRows = Integer.parseInt(
+        cluster.runSql("SELECT COUNT(*) FROM %s", dataSource)
+    );

Review Comment:
   ## Missing catch of NumberFormatException
   
   Potential uncaught 'java.lang.NumberFormatException'.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/10341)



##########
embedded-tests/src/test/java/org/apache/druid/testing/embedded/indexing/KafkaClusterMetricsTest.java:
##########
@@ -292,6 +366,74 @@
     
cluster.callApi().postSupervisor(kafkaSupervisorSpec.createSuspendedSpec());
   }
 
+  @Test
+  @Timeout(60)
+  public void 
test_ingest50kRows_ofSelfClusterMetricsWithScaleIns_andVerifyValues()
+  {
+    final int maxRowsPerSegment = 1000;
+    final int expectedSegmentsHandedOff = 50;
+
+    final int initialTaskCount = 3;
+
+    // Submit and start a supervisor with scale-in configuration
+    final String supervisorId = dataSource + "_supe";
+    AutoScalerConfig autoScalerConfig = new LagBasedAutoScalerConfigBuilder()
+        .withLagCollectionIntervalMillis(500)
+        .withLagCollectionRangeMillis(1000)
+        .withEnableTaskAutoScaler(true)
+        .withScaleActionPeriodMillis(10000)
+        .withScaleActionStartDelayMillis(5000)
+        .withScaleOutThreshold(10000)
+        .withScaleInThreshold(1)
+        .withTriggerScaleOutFractionThreshold(0.9)
+        .withTriggerScaleInFractionThreshold(0.001)
+        .withTaskCountMax(initialTaskCount)
+        .withTaskCountStart(initialTaskCount)
+        .withScaleOutStep(0)
+        .withScaleInStep(1)
+        .withMinTriggerScaleActionFrequencyMillis(10000)
+        .withStopTaskCountRatio(1.0)
+        .build();
+
+    final KafkaSupervisorSpec kafkaSupervisorSpec = createKafkaSupervisor(
+        supervisorId,
+        initialTaskCount,
+        maxRowsPerSegment,
+        autoScalerConfig,
+        true
+    );
+
+    Assertions.assertEquals(
+        supervisorId,
+        cluster.callApi().postSupervisor(kafkaSupervisorSpec)
+    );
+
+    overlord.latchableEmitter().waitForEventAggregate(
+        event -> event.hasMetricName("task/autoScaler/scaleActionTime"),
+        agg -> agg.hasSumAtLeast(2)
+    );
+
+    indexer.latchableEmitter().waitForEventAggregate(
+        event -> event.hasMetricName("ingest/handoff/count"),
+        agg -> agg.hasSumAtLeast(expectedSegmentsHandedOff)
+    );
+
+    final int numSegments = Integer.parseInt(
+        cluster.runSql("SELECT COUNT(*) FROM sys.segments WHERE datasource = 
'%s'", dataSource)
+    );
+    Assertions.assertTrue(numSegments >= expectedSegmentsHandedOff);
+
+    final int numRows = Integer.parseInt(
+        cluster.runSql("SELECT COUNT(*) FROM %s", dataSource)
+    );

Review Comment:
   ## Missing catch of NumberFormatException
   
   Potential uncaught 'java.lang.NumberFormatException'.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4798)



##########
embedded-tests/src/test/java/org/apache/druid/testing/embedded/indexing/KafkaClusterMetricsTest.java:
##########
@@ -175,6 +181,73 @@
     
cluster.callApi().postSupervisor(kafkaSupervisorSpec.createSuspendedSpec());
   }
 
+  @Test
+  @Timeout(60)
+  public void 
test_ingest50kRows_ofSelfClusterMetricsWithScaleOuts_andVerifyValues()
+  {
+    final int maxRowsPerSegment = 1000;
+    final int expectedSegmentsHandedOff = 50;
+
+    final int taskCount = 1;
+
+    // Submit and start a supervisor
+    final String supervisorId = dataSource + "_supe";
+    AutoScalerConfig autoScalerConfig = new LagBasedAutoScalerConfigBuilder()
+        .withLagCollectionIntervalMillis(100)
+        .withLagCollectionRangeMillis(100)
+        .withEnableTaskAutoScaler(true)
+        .withScaleActionPeriodMillis(5000)
+        .withScaleActionStartDelayMillis(10000)
+        .withScaleOutThreshold(0)
+        .withScaleInThreshold(10000)
+        .withTriggerScaleOutFractionThreshold(0.001)
+        .withTriggerScaleInFractionThreshold(0.1)
+        .withTaskCountMax(3)
+        .withTaskCountMin(taskCount)
+        .withScaleOutStep(1)
+        .withScaleInStep(0)
+        .withMinTriggerScaleActionFrequencyMillis(5000)
+        .withStopTaskCountRatio(1.0)
+        .build();
+
+    final KafkaSupervisorSpec kafkaSupervisorSpec = createKafkaSupervisor(
+        supervisorId,
+        taskCount,
+        maxRowsPerSegment,
+        autoScalerConfig,
+        true
+    );
+
+    Assertions.assertEquals(
+        supervisorId,
+        cluster.callApi().postSupervisor(kafkaSupervisorSpec)
+    );
+    overlord.latchableEmitter().waitForEventAggregate(
+        event -> event.hasMetricName("task/autoScaler/scaleActionTime"),
+        agg -> agg.hasSumAtLeast(2)
+    );
+
+    indexer.latchableEmitter().waitForEventAggregate(
+        event -> event.hasMetricName("ingest/handoff/count"),
+        agg -> agg.hasSumAtLeast(expectedSegmentsHandedOff)
+    );
+
+    final int numSegments = Integer.parseInt(
+        cluster.runSql("SELECT COUNT(*) FROM sys.segments WHERE datasource = 
'%s'", dataSource)
+    );
+    Assertions.assertTrue(numSegments >= expectedSegmentsHandedOff);
+
+    final int numRows = Integer.parseInt(
+        cluster.runSql("SELECT COUNT(*) FROM %s", dataSource)
+    );

Review Comment:
   ## Missing catch of NumberFormatException
   
   Potential uncaught 'java.lang.NumberFormatException'.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4780)



##########
embedded-tests/src/test/java/org/apache/druid/testing/embedded/indexing/KafkaClusterMetricsTest.java:
##########
@@ -175,6 +181,73 @@
     
cluster.callApi().postSupervisor(kafkaSupervisorSpec.createSuspendedSpec());
   }
 
+  @Test
+  @Timeout(60)
+  public void 
test_ingest50kRows_ofSelfClusterMetricsWithScaleOuts_andVerifyValues()
+  {
+    final int maxRowsPerSegment = 1000;
+    final int expectedSegmentsHandedOff = 50;
+
+    final int taskCount = 1;
+
+    // Submit and start a supervisor
+    final String supervisorId = dataSource + "_supe";
+    AutoScalerConfig autoScalerConfig = new LagBasedAutoScalerConfigBuilder()
+        .withLagCollectionIntervalMillis(100)
+        .withLagCollectionRangeMillis(100)
+        .withEnableTaskAutoScaler(true)
+        .withScaleActionPeriodMillis(5000)
+        .withScaleActionStartDelayMillis(10000)
+        .withScaleOutThreshold(0)
+        .withScaleInThreshold(10000)
+        .withTriggerScaleOutFractionThreshold(0.001)
+        .withTriggerScaleInFractionThreshold(0.1)
+        .withTaskCountMax(3)
+        .withTaskCountMin(taskCount)
+        .withScaleOutStep(1)
+        .withScaleInStep(0)
+        .withMinTriggerScaleActionFrequencyMillis(5000)
+        .withStopTaskCountRatio(1.0)
+        .build();
+
+    final KafkaSupervisorSpec kafkaSupervisorSpec = createKafkaSupervisor(
+        supervisorId,
+        taskCount,
+        maxRowsPerSegment,
+        autoScalerConfig,
+        true
+    );
+
+    Assertions.assertEquals(
+        supervisorId,
+        cluster.callApi().postSupervisor(kafkaSupervisorSpec)
+    );
+    overlord.latchableEmitter().waitForEventAggregate(
+        event -> event.hasMetricName("task/autoScaler/scaleActionTime"),
+        agg -> agg.hasSumAtLeast(2)
+    );
+
+    indexer.latchableEmitter().waitForEventAggregate(
+        event -> event.hasMetricName("ingest/handoff/count"),
+        agg -> agg.hasSumAtLeast(expectedSegmentsHandedOff)
+    );
+
+    final int numSegments = Integer.parseInt(
+        cluster.runSql("SELECT COUNT(*) FROM sys.segments WHERE datasource = 
'%s'", dataSource)
+    );

Review Comment:
   ## Missing catch of NumberFormatException
   
   Potential uncaught 'java.lang.NumberFormatException'.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4779)



##########
extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisor.java:
##########
@@ -226,6 +257,70 @@
     );
   }
 
+  @Override
+  protected SeekableStreamIndexTaskIOConfig<KafkaTopicPartition, Long> 
createUpdatedTaskIoConfig(
+      Set<KafkaTopicPartition> partitions,
+      TaskGroup existingTaskGroup,
+      Map<KafkaTopicPartition, Long> latestCommittedOffsets,
+      Map<KafkaTopicPartition, Long> latestTaskOffsetsOnPause
+  )
+  {
+    log.info("Creating updated task IO config for task group [%s]", 
existingTaskGroup.getId());
+    Map<KafkaTopicPartition, Long> startingSequences = new HashMap<>();
+    Set<KafkaTopicPartition> exclusiveStartSequenceNumberPartitions = new 
HashSet<>();
+
+    for (KafkaTopicPartition partition : partitions) {
+      Long offset = Math.max(
+          latestTaskOffsetsOnPause.getOrDefault(partition, 0L),
+          latestCommittedOffsets.getOrDefault(partition, 0L)
+      );
+      startingSequences.put(partition, offset);
+    }
+
+    SeekableStreamStartSequenceNumbers<KafkaTopicPartition, Long> 
startSequenceNumbers =
+        new SeekableStreamStartSequenceNumbers<>(
+            spec.getIoConfig().getStream(),
+            startingSequences,
+            exclusiveStartSequenceNumberPartitions
+        );
+
+    // For end sequences, use NOT_SET to indicate open-ended reading
+    Map<KafkaTopicPartition, Long> endingSequences = new HashMap<>();
+    for (KafkaTopicPartition partition : partitions) {
+      endingSequences.put(partition, END_OF_PARTITION);
+    }
+
+    SeekableStreamEndSequenceNumbers<KafkaTopicPartition, Long> 
endSequenceNumbers =
+        new SeekableStreamEndSequenceNumbers<>(
+            spec.getIoConfig().getStream(),

Review Comment:
   ## Deprecated method or constructor invocation
   
   Invoking [KafkaSupervisorSpec.getIoConfig](1) should be avoided because it 
has been deprecated.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4783)



##########
extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisor.java:
##########
@@ -226,6 +257,70 @@
     );
   }
 
+  @Override
+  protected SeekableStreamIndexTaskIOConfig<KafkaTopicPartition, Long> 
createUpdatedTaskIoConfig(
+      Set<KafkaTopicPartition> partitions,
+      TaskGroup existingTaskGroup,
+      Map<KafkaTopicPartition, Long> latestCommittedOffsets,
+      Map<KafkaTopicPartition, Long> latestTaskOffsetsOnPause
+  )
+  {
+    log.info("Creating updated task IO config for task group [%s]", 
existingTaskGroup.getId());
+    Map<KafkaTopicPartition, Long> startingSequences = new HashMap<>();
+    Set<KafkaTopicPartition> exclusiveStartSequenceNumberPartitions = new 
HashSet<>();
+
+    for (KafkaTopicPartition partition : partitions) {
+      Long offset = Math.max(
+          latestTaskOffsetsOnPause.getOrDefault(partition, 0L),
+          latestCommittedOffsets.getOrDefault(partition, 0L)
+      );
+      startingSequences.put(partition, offset);
+    }
+
+    SeekableStreamStartSequenceNumbers<KafkaTopicPartition, Long> 
startSequenceNumbers =
+        new SeekableStreamStartSequenceNumbers<>(
+            spec.getIoConfig().getStream(),

Review Comment:
   ## Deprecated method or constructor invocation
   
   Invoking [KafkaSupervisorSpec.getIoConfig](1) should be avoided because it 
has been deprecated.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4782)



##########
indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/SeekableStreamSupervisor.java:
##########
@@ -1644,11 +2003,11 @@
       }
     }
 
-    SeekableStreamIndexTaskTuningConfig ss = 
spec.getSpec().getTuningConfig().convertToTaskTuningConfig();
+    SeekableStreamIndexTaskTuningConfig ss = 
spec.getSpec().getTuningConfig().convertToTaskTuningConfig(spec.usePerpetuallyRunningTasks());

Review Comment:
   ## Unread local variable
   
   Variable 'SeekableStreamIndexTaskTuningConfig ss' is never read.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4796)



##########
extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisor.java:
##########
@@ -226,6 +257,70 @@
     );
   }
 
+  @Override
+  protected SeekableStreamIndexTaskIOConfig<KafkaTopicPartition, Long> 
createUpdatedTaskIoConfig(
+      Set<KafkaTopicPartition> partitions,
+      TaskGroup existingTaskGroup,
+      Map<KafkaTopicPartition, Long> latestCommittedOffsets,
+      Map<KafkaTopicPartition, Long> latestTaskOffsetsOnPause
+  )
+  {
+    log.info("Creating updated task IO config for task group [%s]", 
existingTaskGroup.getId());
+    Map<KafkaTopicPartition, Long> startingSequences = new HashMap<>();
+    Set<KafkaTopicPartition> exclusiveStartSequenceNumberPartitions = new 
HashSet<>();
+
+    for (KafkaTopicPartition partition : partitions) {
+      Long offset = Math.max(
+          latestTaskOffsetsOnPause.getOrDefault(partition, 0L),
+          latestCommittedOffsets.getOrDefault(partition, 0L)
+      );
+      startingSequences.put(partition, offset);
+    }
+
+    SeekableStreamStartSequenceNumbers<KafkaTopicPartition, Long> 
startSequenceNumbers =
+        new SeekableStreamStartSequenceNumbers<>(
+            spec.getIoConfig().getStream(),
+            startingSequences,
+            exclusiveStartSequenceNumberPartitions
+        );
+
+    // For end sequences, use NOT_SET to indicate open-ended reading
+    Map<KafkaTopicPartition, Long> endingSequences = new HashMap<>();
+    for (KafkaTopicPartition partition : partitions) {
+      endingSequences.put(partition, END_OF_PARTITION);
+    }
+
+    SeekableStreamEndSequenceNumbers<KafkaTopicPartition, Long> 
endSequenceNumbers =
+        new SeekableStreamEndSequenceNumbers<>(
+            spec.getIoConfig().getStream(),
+            endingSequences
+        );
+
+    log.info(
+        "Created updated IOConfig with starting sequences [%s] for partitions 
[%s]",
+        startingSequences, partitions
+    );
+
+    // Create the updated IOConfig
+    return new KafkaIndexTaskIOConfig(
+        existingTaskGroup.getId(),
+        existingTaskGroup.getBaseSequenceName(),
+        null,
+        null,
+        startSequenceNumbers,
+        endSequenceNumbers,
+        spec.getIoConfig().getConsumerProperties(),
+        spec.getIoConfig().getPollTimeout(),
+        true,
+        existingTaskGroup.getMinimumMessageTime(),
+        existingTaskGroup.getMaximumMessageTime(),
+        spec.getIoConfig().getInputFormat(),
+        spec.getIoConfig().getConfigOverrides(),
+        spec.getIoConfig().isMultiTopic(),

Review Comment:
   ## Deprecated method or constructor invocation
   
   Invoking [KafkaSupervisorSpec.getIoConfig](1) should be avoided because it 
has been deprecated.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4788)



##########
extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisor.java:
##########
@@ -226,6 +257,70 @@
     );
   }
 
+  @Override
+  protected SeekableStreamIndexTaskIOConfig<KafkaTopicPartition, Long> 
createUpdatedTaskIoConfig(
+      Set<KafkaTopicPartition> partitions,
+      TaskGroup existingTaskGroup,
+      Map<KafkaTopicPartition, Long> latestCommittedOffsets,
+      Map<KafkaTopicPartition, Long> latestTaskOffsetsOnPause
+  )
+  {
+    log.info("Creating updated task IO config for task group [%s]", 
existingTaskGroup.getId());
+    Map<KafkaTopicPartition, Long> startingSequences = new HashMap<>();
+    Set<KafkaTopicPartition> exclusiveStartSequenceNumberPartitions = new 
HashSet<>();
+
+    for (KafkaTopicPartition partition : partitions) {
+      Long offset = Math.max(
+          latestTaskOffsetsOnPause.getOrDefault(partition, 0L),
+          latestCommittedOffsets.getOrDefault(partition, 0L)
+      );
+      startingSequences.put(partition, offset);
+    }
+
+    SeekableStreamStartSequenceNumbers<KafkaTopicPartition, Long> 
startSequenceNumbers =
+        new SeekableStreamStartSequenceNumbers<>(
+            spec.getIoConfig().getStream(),
+            startingSequences,
+            exclusiveStartSequenceNumberPartitions
+        );
+
+    // For end sequences, use NOT_SET to indicate open-ended reading
+    Map<KafkaTopicPartition, Long> endingSequences = new HashMap<>();
+    for (KafkaTopicPartition partition : partitions) {
+      endingSequences.put(partition, END_OF_PARTITION);
+    }
+
+    SeekableStreamEndSequenceNumbers<KafkaTopicPartition, Long> 
endSequenceNumbers =
+        new SeekableStreamEndSequenceNumbers<>(
+            spec.getIoConfig().getStream(),
+            endingSequences
+        );
+
+    log.info(
+        "Created updated IOConfig with starting sequences [%s] for partitions 
[%s]",
+        startingSequences, partitions
+    );
+
+    // Create the updated IOConfig
+    return new KafkaIndexTaskIOConfig(
+        existingTaskGroup.getId(),
+        existingTaskGroup.getBaseSequenceName(),
+        null,
+        null,
+        startSequenceNumbers,
+        endSequenceNumbers,
+        spec.getIoConfig().getConsumerProperties(),

Review Comment:
   ## Deprecated method or constructor invocation
   
   Invoking [KafkaSupervisorSpec.getIoConfig](1) should be avoided because it 
has been deprecated.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4784)



##########
extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisor.java:
##########
@@ -226,6 +257,70 @@
     );
   }
 
+  @Override
+  protected SeekableStreamIndexTaskIOConfig<KafkaTopicPartition, Long> 
createUpdatedTaskIoConfig(
+      Set<KafkaTopicPartition> partitions,
+      TaskGroup existingTaskGroup,
+      Map<KafkaTopicPartition, Long> latestCommittedOffsets,
+      Map<KafkaTopicPartition, Long> latestTaskOffsetsOnPause
+  )
+  {
+    log.info("Creating updated task IO config for task group [%s]", 
existingTaskGroup.getId());
+    Map<KafkaTopicPartition, Long> startingSequences = new HashMap<>();
+    Set<KafkaTopicPartition> exclusiveStartSequenceNumberPartitions = new 
HashSet<>();
+
+    for (KafkaTopicPartition partition : partitions) {
+      Long offset = Math.max(
+          latestTaskOffsetsOnPause.getOrDefault(partition, 0L),
+          latestCommittedOffsets.getOrDefault(partition, 0L)
+      );
+      startingSequences.put(partition, offset);
+    }
+
+    SeekableStreamStartSequenceNumbers<KafkaTopicPartition, Long> 
startSequenceNumbers =
+        new SeekableStreamStartSequenceNumbers<>(
+            spec.getIoConfig().getStream(),
+            startingSequences,
+            exclusiveStartSequenceNumberPartitions
+        );
+
+    // For end sequences, use NOT_SET to indicate open-ended reading
+    Map<KafkaTopicPartition, Long> endingSequences = new HashMap<>();
+    for (KafkaTopicPartition partition : partitions) {
+      endingSequences.put(partition, END_OF_PARTITION);
+    }
+
+    SeekableStreamEndSequenceNumbers<KafkaTopicPartition, Long> 
endSequenceNumbers =
+        new SeekableStreamEndSequenceNumbers<>(
+            spec.getIoConfig().getStream(),
+            endingSequences
+        );
+
+    log.info(
+        "Created updated IOConfig with starting sequences [%s] for partitions 
[%s]",
+        startingSequences, partitions
+    );
+
+    // Create the updated IOConfig
+    return new KafkaIndexTaskIOConfig(
+        existingTaskGroup.getId(),
+        existingTaskGroup.getBaseSequenceName(),
+        null,
+        null,
+        startSequenceNumbers,
+        endSequenceNumbers,
+        spec.getIoConfig().getConsumerProperties(),
+        spec.getIoConfig().getPollTimeout(),
+        true,
+        existingTaskGroup.getMinimumMessageTime(),
+        existingTaskGroup.getMaximumMessageTime(),
+        spec.getIoConfig().getInputFormat(),
+        spec.getIoConfig().getConfigOverrides(),

Review Comment:
   ## Deprecated method or constructor invocation
   
   Invoking [KafkaSupervisorSpec.getIoConfig](1) should be avoided because it 
has been deprecated.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4787)



##########
embedded-tests/src/test/java/org/apache/druid/testing/embedded/indexing/KafkaClusterMetricsTest.java:
##########
@@ -292,6 +366,74 @@
     
cluster.callApi().postSupervisor(kafkaSupervisorSpec.createSuspendedSpec());
   }
 
+  @Test
+  @Timeout(60)
+  public void 
test_ingest50kRows_ofSelfClusterMetricsWithScaleIns_andVerifyValues()
+  {
+    final int maxRowsPerSegment = 1000;
+    final int expectedSegmentsHandedOff = 50;
+
+    final int initialTaskCount = 3;
+
+    // Submit and start a supervisor with scale-in configuration
+    final String supervisorId = dataSource + "_supe";
+    AutoScalerConfig autoScalerConfig = new LagBasedAutoScalerConfigBuilder()
+        .withLagCollectionIntervalMillis(500)
+        .withLagCollectionRangeMillis(1000)
+        .withEnableTaskAutoScaler(true)
+        .withScaleActionPeriodMillis(10000)
+        .withScaleActionStartDelayMillis(5000)
+        .withScaleOutThreshold(10000)
+        .withScaleInThreshold(1)
+        .withTriggerScaleOutFractionThreshold(0.9)
+        .withTriggerScaleInFractionThreshold(0.001)
+        .withTaskCountMax(initialTaskCount)
+        .withTaskCountStart(initialTaskCount)
+        .withScaleOutStep(0)
+        .withScaleInStep(1)
+        .withMinTriggerScaleActionFrequencyMillis(10000)
+        .withStopTaskCountRatio(1.0)
+        .build();
+
+    final KafkaSupervisorSpec kafkaSupervisorSpec = createKafkaSupervisor(
+        supervisorId,
+        initialTaskCount,
+        maxRowsPerSegment,
+        autoScalerConfig,
+        true
+    );
+
+    Assertions.assertEquals(
+        supervisorId,
+        cluster.callApi().postSupervisor(kafkaSupervisorSpec)
+    );
+
+    overlord.latchableEmitter().waitForEventAggregate(
+        event -> event.hasMetricName("task/autoScaler/scaleActionTime"),
+        agg -> agg.hasSumAtLeast(2)
+    );
+
+    indexer.latchableEmitter().waitForEventAggregate(
+        event -> event.hasMetricName("ingest/handoff/count"),
+        agg -> agg.hasSumAtLeast(expectedSegmentsHandedOff)
+    );
+
+    final int numSegments = Integer.parseInt(
+        cluster.runSql("SELECT COUNT(*) FROM sys.segments WHERE datasource = 
'%s'", dataSource)
+    );

Review Comment:
   ## Missing catch of NumberFormatException
   
   Potential uncaught 'java.lang.NumberFormatException'.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4797)



##########
extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisor.java:
##########
@@ -226,6 +257,70 @@
     );
   }
 
+  @Override
+  protected SeekableStreamIndexTaskIOConfig<KafkaTopicPartition, Long> 
createUpdatedTaskIoConfig(
+      Set<KafkaTopicPartition> partitions,
+      TaskGroup existingTaskGroup,
+      Map<KafkaTopicPartition, Long> latestCommittedOffsets,
+      Map<KafkaTopicPartition, Long> latestTaskOffsetsOnPause
+  )
+  {
+    log.info("Creating updated task IO config for task group [%s]", 
existingTaskGroup.getId());
+    Map<KafkaTopicPartition, Long> startingSequences = new HashMap<>();
+    Set<KafkaTopicPartition> exclusiveStartSequenceNumberPartitions = new 
HashSet<>();
+
+    for (KafkaTopicPartition partition : partitions) {
+      Long offset = Math.max(
+          latestTaskOffsetsOnPause.getOrDefault(partition, 0L),
+          latestCommittedOffsets.getOrDefault(partition, 0L)
+      );
+      startingSequences.put(partition, offset);
+    }
+
+    SeekableStreamStartSequenceNumbers<KafkaTopicPartition, Long> 
startSequenceNumbers =
+        new SeekableStreamStartSequenceNumbers<>(
+            spec.getIoConfig().getStream(),
+            startingSequences,
+            exclusiveStartSequenceNumberPartitions
+        );
+
+    // For end sequences, use NOT_SET to indicate open-ended reading
+    Map<KafkaTopicPartition, Long> endingSequences = new HashMap<>();
+    for (KafkaTopicPartition partition : partitions) {
+      endingSequences.put(partition, END_OF_PARTITION);
+    }
+
+    SeekableStreamEndSequenceNumbers<KafkaTopicPartition, Long> 
endSequenceNumbers =
+        new SeekableStreamEndSequenceNumbers<>(
+            spec.getIoConfig().getStream(),
+            endingSequences
+        );
+
+    log.info(
+        "Created updated IOConfig with starting sequences [%s] for partitions 
[%s]",
+        startingSequences, partitions
+    );
+
+    // Create the updated IOConfig
+    return new KafkaIndexTaskIOConfig(
+        existingTaskGroup.getId(),
+        existingTaskGroup.getBaseSequenceName(),
+        null,
+        null,
+        startSequenceNumbers,
+        endSequenceNumbers,
+        spec.getIoConfig().getConsumerProperties(),
+        spec.getIoConfig().getPollTimeout(),

Review Comment:
   ## Deprecated method or constructor invocation
   
   Invoking [KafkaSupervisorSpec.getIoConfig](1) should be avoided because it 
has been deprecated.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4785)



##########
extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisor.java:
##########
@@ -226,6 +257,70 @@
     );
   }
 
+  @Override
+  protected SeekableStreamIndexTaskIOConfig<KafkaTopicPartition, Long> 
createUpdatedTaskIoConfig(
+      Set<KafkaTopicPartition> partitions,
+      TaskGroup existingTaskGroup,
+      Map<KafkaTopicPartition, Long> latestCommittedOffsets,
+      Map<KafkaTopicPartition, Long> latestTaskOffsetsOnPause
+  )
+  {
+    log.info("Creating updated task IO config for task group [%s]", 
existingTaskGroup.getId());
+    Map<KafkaTopicPartition, Long> startingSequences = new HashMap<>();
+    Set<KafkaTopicPartition> exclusiveStartSequenceNumberPartitions = new 
HashSet<>();
+
+    for (KafkaTopicPartition partition : partitions) {
+      Long offset = Math.max(
+          latestTaskOffsetsOnPause.getOrDefault(partition, 0L),
+          latestCommittedOffsets.getOrDefault(partition, 0L)
+      );
+      startingSequences.put(partition, offset);
+    }
+
+    SeekableStreamStartSequenceNumbers<KafkaTopicPartition, Long> 
startSequenceNumbers =
+        new SeekableStreamStartSequenceNumbers<>(
+            spec.getIoConfig().getStream(),
+            startingSequences,
+            exclusiveStartSequenceNumberPartitions
+        );
+
+    // For end sequences, use NOT_SET to indicate open-ended reading
+    Map<KafkaTopicPartition, Long> endingSequences = new HashMap<>();
+    for (KafkaTopicPartition partition : partitions) {
+      endingSequences.put(partition, END_OF_PARTITION);
+    }
+
+    SeekableStreamEndSequenceNumbers<KafkaTopicPartition, Long> 
endSequenceNumbers =
+        new SeekableStreamEndSequenceNumbers<>(
+            spec.getIoConfig().getStream(),
+            endingSequences
+        );
+
+    log.info(
+        "Created updated IOConfig with starting sequences [%s] for partitions 
[%s]",
+        startingSequences, partitions
+    );
+
+    // Create the updated IOConfig
+    return new KafkaIndexTaskIOConfig(
+        existingTaskGroup.getId(),
+        existingTaskGroup.getBaseSequenceName(),
+        null,
+        null,
+        startSequenceNumbers,
+        endSequenceNumbers,
+        spec.getIoConfig().getConsumerProperties(),
+        spec.getIoConfig().getPollTimeout(),
+        true,
+        existingTaskGroup.getMinimumMessageTime(),
+        existingTaskGroup.getMaximumMessageTime(),
+        spec.getIoConfig().getInputFormat(),
+        spec.getIoConfig().getConfigOverrides(),
+        spec.getIoConfig().isMultiTopic(),
+        spec.getIoConfig().getTaskDuration().getStandardMinutes()

Review Comment:
   ## Deprecated method or constructor invocation
   
   Invoking [KafkaSupervisorSpec.getIoConfig](1) should be avoided because it 
has been deprecated.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4789)



##########
indexing-service/src/test/java/org/apache/druid/indexing/seekablestream/common/OrderedSequenceNumberTest.java:
##########
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.indexing.seekablestream.common;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import javax.validation.constraints.NotNull;
+
+public class OrderedSequenceNumberTest
+{
+  @Test
+  public void test_isMoreToReadBeforeReadingRecord_exclusiveEnd_lessThan()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(5L, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertTrue("Should have more to read when current < end with 
exclusive end",
+                     current.isMoreToReadBeforeReadingRecord(end, true));
+  }
+
+  @Test
+  public void test_isMoreToReadBeforeReadingRecord_exclusiveEnd_equalTo()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(10L, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertFalse("Should NOT have more to read when current == end with 
exclusive end",
+                      current.isMoreToReadBeforeReadingRecord(end, true));
+  }
+
+  @Test
+  public void testIsMoreToReadBeforeReadingRecord_exclusiveEnd_greaterThan()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(15L, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertFalse("Should NOT have more to read when current > end with 
exclusive end",
+                      current.isMoreToReadBeforeReadingRecord(end, true));
+  }
+
+  @Test
+  public void testIsMoreToReadBeforeReadingRecord_inclusiveEnd_lessThan()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(5L, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertTrue("Should have more to read when current < end with 
inclusive end",
+                     current.isMoreToReadBeforeReadingRecord(end, false));
+  }
+
+  @Test
+  public void testIsMoreToReadBeforeReadingRecord_inclusiveEnd_equalTo()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(10L, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertTrue("Should have more to read when current == end with 
inclusive end",
+                     current.isMoreToReadBeforeReadingRecord(end, false));
+  }
+
+  @Test
+  public void testIsMoreToReadBeforeReadingRecord_inclusiveEnd_greaterThan()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(15L, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertFalse("Should NOT have more to read when current > end with 
inclusive end",
+                      current.isMoreToReadBeforeReadingRecord(end, false));
+  }
+
+  @Test
+  public void 
testIsMoreToReadBeforeReadingRecord_nullEndSequenceNumber_exclusiveEnd()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(5L, false);
+    TestSequenceNumber end = new TestSequenceNumber(null, false);
+
+    Assert.assertFalse("Should return false when end sequence number is null",
+                      current.isMoreToReadBeforeReadingRecord(end, true));
+  }
+
+  @Test
+  public void 
testIsMoreToReadBeforeReadingRecord_nullEndSequenceNumber_inclusiveEnd()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(5L, false);
+    TestSequenceNumber end = new TestSequenceNumber(null, false);
+
+    Assert.assertFalse("Should return false when end sequence number is null",
+                      current.isMoreToReadBeforeReadingRecord(end, false));
+  }
+
+  @Test
+  public void 
testIsMoreToReadBeforeReadingRecord_nullCurrentSequenceNumber_exclusiveEnd()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(null, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertFalse("Should return false when current sequence number is 
null",
+                      current.isMoreToReadBeforeReadingRecord(end, true));
+  }
+
+  @Test
+  public void 
testIsMoreToReadBeforeReadingRecord_nullCurrentSequenceNumber_inclusiveEnd()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(null, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertFalse("Should return false when current sequence number is 
null",
+                      current.isMoreToReadBeforeReadingRecord(end, false));
+  }
+
+  @Test
+  public void testIsMoreToReadBeforeReadingRecord_bothNull()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(null, false);
+    TestSequenceNumber end = new TestSequenceNumber(null, false);
+
+    Assert.assertFalse("Should return false when both sequence numbers are 
null",
+                      current.isMoreToReadBeforeReadingRecord(end, true));
+    Assert.assertFalse("Should return false when both sequence numbers are 
null",
+                      current.isMoreToReadBeforeReadingRecord(end, false));
+  }
+
+  @Test
+  public void testIsMoreToReadBeforeReadingRecord_exceptionHandling()
+  {
+    TestExceptionSequenceNumber current = new TestExceptionSequenceNumber(5L, 
false);
+    TestExceptionSequenceNumber end = new TestExceptionSequenceNumber(10L, 
false);
+
+    Assert.assertFalse("Should return false when comparison throws exception",
+                      current.isMoreToReadBeforeReadingRecord(end, true));
+    Assert.assertFalse("Should return false when comparison throws exception",
+                      current.isMoreToReadBeforeReadingRecord(end, false));
+  }
+
+  @Test
+  public void testIsMoreToReadBeforeReadingRecord_differentExclusivityFlags()
+  {
+    TestSequenceNumber currentExclusive = new TestSequenceNumber(5L, true);
+    TestSequenceNumber currentInclusive = new TestSequenceNumber(5L, false);
+    TestSequenceNumber endExclusive = new TestSequenceNumber(10L, true);
+    TestSequenceNumber endInclusive = new TestSequenceNumber(10L, false);
+
+    // Test different combinations with exclusive end
+    Assert.assertTrue("Should work with different exclusivity flags",
+                     
currentExclusive.isMoreToReadBeforeReadingRecord(endExclusive, true));
+    Assert.assertTrue("Should work with different exclusivity flags",
+                     
currentExclusive.isMoreToReadBeforeReadingRecord(endInclusive, true));
+    Assert.assertTrue("Should work with different exclusivity flags",
+                     
currentInclusive.isMoreToReadBeforeReadingRecord(endExclusive, true));
+
+    // Test different combinations with inclusive end
+    Assert.assertTrue("Should work with different exclusivity flags",
+                     
currentExclusive.isMoreToReadBeforeReadingRecord(endExclusive, false));
+    Assert.assertTrue("Should work with different exclusivity flags",
+                     
currentExclusive.isMoreToReadBeforeReadingRecord(endInclusive, false));
+    Assert.assertTrue("Should work with different exclusivity flags",
+                     
currentInclusive.isMoreToReadBeforeReadingRecord(endExclusive, false));
+  }
+
+  /**
+   * Test implementation of OrderedSequenceNumber for Long values
+   */
+  private static class TestSequenceNumber extends OrderedSequenceNumber<Long>

Review Comment:
   ## Inconsistent compareTo
   
   This class declares [compareTo](1) but inherits equals; the two could be 
inconsistent.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4795)



##########
indexing-service/src/test/java/org/apache/druid/indexing/seekablestream/common/OrderedSequenceNumberTest.java:
##########
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.indexing.seekablestream.common;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import javax.validation.constraints.NotNull;
+
+public class OrderedSequenceNumberTest
+{
+  @Test
+  public void test_isMoreToReadBeforeReadingRecord_exclusiveEnd_lessThan()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(5L, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertTrue("Should have more to read when current < end with 
exclusive end",
+                     current.isMoreToReadBeforeReadingRecord(end, true));
+  }
+
+  @Test
+  public void test_isMoreToReadBeforeReadingRecord_exclusiveEnd_equalTo()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(10L, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertFalse("Should NOT have more to read when current == end with 
exclusive end",
+                      current.isMoreToReadBeforeReadingRecord(end, true));
+  }
+
+  @Test
+  public void testIsMoreToReadBeforeReadingRecord_exclusiveEnd_greaterThan()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(15L, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertFalse("Should NOT have more to read when current > end with 
exclusive end",
+                      current.isMoreToReadBeforeReadingRecord(end, true));
+  }
+
+  @Test
+  public void testIsMoreToReadBeforeReadingRecord_inclusiveEnd_lessThan()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(5L, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertTrue("Should have more to read when current < end with 
inclusive end",
+                     current.isMoreToReadBeforeReadingRecord(end, false));
+  }
+
+  @Test
+  public void testIsMoreToReadBeforeReadingRecord_inclusiveEnd_equalTo()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(10L, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertTrue("Should have more to read when current == end with 
inclusive end",
+                     current.isMoreToReadBeforeReadingRecord(end, false));
+  }
+
+  @Test
+  public void testIsMoreToReadBeforeReadingRecord_inclusiveEnd_greaterThan()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(15L, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertFalse("Should NOT have more to read when current > end with 
inclusive end",
+                      current.isMoreToReadBeforeReadingRecord(end, false));
+  }
+
+  @Test
+  public void 
testIsMoreToReadBeforeReadingRecord_nullEndSequenceNumber_exclusiveEnd()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(5L, false);
+    TestSequenceNumber end = new TestSequenceNumber(null, false);
+
+    Assert.assertFalse("Should return false when end sequence number is null",
+                      current.isMoreToReadBeforeReadingRecord(end, true));
+  }
+
+  @Test
+  public void 
testIsMoreToReadBeforeReadingRecord_nullEndSequenceNumber_inclusiveEnd()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(5L, false);
+    TestSequenceNumber end = new TestSequenceNumber(null, false);
+
+    Assert.assertFalse("Should return false when end sequence number is null",
+                      current.isMoreToReadBeforeReadingRecord(end, false));
+  }
+
+  @Test
+  public void 
testIsMoreToReadBeforeReadingRecord_nullCurrentSequenceNumber_exclusiveEnd()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(null, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertFalse("Should return false when current sequence number is 
null",
+                      current.isMoreToReadBeforeReadingRecord(end, true));
+  }
+
+  @Test
+  public void 
testIsMoreToReadBeforeReadingRecord_nullCurrentSequenceNumber_inclusiveEnd()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(null, false);
+    TestSequenceNumber end = new TestSequenceNumber(10L, false);
+
+    Assert.assertFalse("Should return false when current sequence number is 
null",
+                      current.isMoreToReadBeforeReadingRecord(end, false));
+  }
+
+  @Test
+  public void testIsMoreToReadBeforeReadingRecord_bothNull()
+  {
+    TestSequenceNumber current = new TestSequenceNumber(null, false);
+    TestSequenceNumber end = new TestSequenceNumber(null, false);
+
+    Assert.assertFalse("Should return false when both sequence numbers are 
null",
+                      current.isMoreToReadBeforeReadingRecord(end, true));
+    Assert.assertFalse("Should return false when both sequence numbers are 
null",
+                      current.isMoreToReadBeforeReadingRecord(end, false));
+  }
+
+  @Test
+  public void testIsMoreToReadBeforeReadingRecord_exceptionHandling()
+  {
+    TestExceptionSequenceNumber current = new TestExceptionSequenceNumber(5L, 
false);
+    TestExceptionSequenceNumber end = new TestExceptionSequenceNumber(10L, 
false);
+
+    Assert.assertFalse("Should return false when comparison throws exception",
+                      current.isMoreToReadBeforeReadingRecord(end, true));
+    Assert.assertFalse("Should return false when comparison throws exception",
+                      current.isMoreToReadBeforeReadingRecord(end, false));
+  }
+
+  @Test
+  public void testIsMoreToReadBeforeReadingRecord_differentExclusivityFlags()
+  {
+    TestSequenceNumber currentExclusive = new TestSequenceNumber(5L, true);
+    TestSequenceNumber currentInclusive = new TestSequenceNumber(5L, false);
+    TestSequenceNumber endExclusive = new TestSequenceNumber(10L, true);
+    TestSequenceNumber endInclusive = new TestSequenceNumber(10L, false);
+
+    // Test different combinations with exclusive end
+    Assert.assertTrue("Should work with different exclusivity flags",
+                     
currentExclusive.isMoreToReadBeforeReadingRecord(endExclusive, true));
+    Assert.assertTrue("Should work with different exclusivity flags",
+                     
currentExclusive.isMoreToReadBeforeReadingRecord(endInclusive, true));
+    Assert.assertTrue("Should work with different exclusivity flags",
+                     
currentInclusive.isMoreToReadBeforeReadingRecord(endExclusive, true));
+
+    // Test different combinations with inclusive end
+    Assert.assertTrue("Should work with different exclusivity flags",
+                     
currentExclusive.isMoreToReadBeforeReadingRecord(endExclusive, false));
+    Assert.assertTrue("Should work with different exclusivity flags",
+                     
currentExclusive.isMoreToReadBeforeReadingRecord(endInclusive, false));
+    Assert.assertTrue("Should work with different exclusivity flags",
+                     
currentInclusive.isMoreToReadBeforeReadingRecord(endExclusive, false));
+  }
+
+  /**
+   * Test implementation of OrderedSequenceNumber for Long values
+   */
+  private static class TestSequenceNumber extends OrderedSequenceNumber<Long>
+  {
+    public TestSequenceNumber(Long sequenceNumber, boolean isExclusive)
+    {
+      super(sequenceNumber, isExclusive);
+    }
+
+    @Override
+    public int compareTo(@NotNull OrderedSequenceNumber<Long> o)
+    {
+      return this.get().compareTo(o.get());
+    }
+  }
+
+  /**
+   * Test implementation that throws exceptions on comparison
+   */
+  private static class TestExceptionSequenceNumber extends 
OrderedSequenceNumber<Long>

Review Comment:
   ## Inconsistent compareTo
   
   This class declares [compareTo](1) but inherits equals; the two could be 
inconsistent.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4794)



##########
extensions-core/kafka-indexing-service/src/main/java/org/apache/druid/indexing/kafka/supervisor/KafkaSupervisor.java:
##########
@@ -226,6 +257,70 @@
     );
   }
 
+  @Override
+  protected SeekableStreamIndexTaskIOConfig<KafkaTopicPartition, Long> 
createUpdatedTaskIoConfig(
+      Set<KafkaTopicPartition> partitions,
+      TaskGroup existingTaskGroup,
+      Map<KafkaTopicPartition, Long> latestCommittedOffsets,
+      Map<KafkaTopicPartition, Long> latestTaskOffsetsOnPause
+  )
+  {
+    log.info("Creating updated task IO config for task group [%s]", 
existingTaskGroup.getId());
+    Map<KafkaTopicPartition, Long> startingSequences = new HashMap<>();
+    Set<KafkaTopicPartition> exclusiveStartSequenceNumberPartitions = new 
HashSet<>();
+
+    for (KafkaTopicPartition partition : partitions) {
+      Long offset = Math.max(
+          latestTaskOffsetsOnPause.getOrDefault(partition, 0L),
+          latestCommittedOffsets.getOrDefault(partition, 0L)
+      );
+      startingSequences.put(partition, offset);
+    }
+
+    SeekableStreamStartSequenceNumbers<KafkaTopicPartition, Long> 
startSequenceNumbers =
+        new SeekableStreamStartSequenceNumbers<>(
+            spec.getIoConfig().getStream(),
+            startingSequences,
+            exclusiveStartSequenceNumberPartitions
+        );
+
+    // For end sequences, use NOT_SET to indicate open-ended reading
+    Map<KafkaTopicPartition, Long> endingSequences = new HashMap<>();
+    for (KafkaTopicPartition partition : partitions) {
+      endingSequences.put(partition, END_OF_PARTITION);
+    }
+
+    SeekableStreamEndSequenceNumbers<KafkaTopicPartition, Long> 
endSequenceNumbers =
+        new SeekableStreamEndSequenceNumbers<>(
+            spec.getIoConfig().getStream(),
+            endingSequences
+        );
+
+    log.info(
+        "Created updated IOConfig with starting sequences [%s] for partitions 
[%s]",
+        startingSequences, partitions
+    );
+
+    // Create the updated IOConfig
+    return new KafkaIndexTaskIOConfig(
+        existingTaskGroup.getId(),
+        existingTaskGroup.getBaseSequenceName(),
+        null,
+        null,
+        startSequenceNumbers,
+        endSequenceNumbers,
+        spec.getIoConfig().getConsumerProperties(),
+        spec.getIoConfig().getPollTimeout(),
+        true,
+        existingTaskGroup.getMinimumMessageTime(),
+        existingTaskGroup.getMaximumMessageTime(),
+        spec.getIoConfig().getInputFormat(),

Review Comment:
   ## Deprecated method or constructor invocation
   
   Invoking [KafkaSupervisorSpec.getIoConfig](1) should be avoided because it 
has been deprecated.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/4786)



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to