kfaraz commented on code in PR #18108:
URL: https://github.com/apache/druid/pull/18108#discussion_r2137656360


##########
server/src/main/java/org/apache/druid/metadata/SQLMetadataConnector.java:
##########
@@ -1013,6 +1013,10 @@ public Void withHandle(Handle handle)
     }
   }
 
+  public void deleteAllSegmentRecords() {

Review Comment:
   Is this needed? At the end of the test, the DB would be torn down anyway.



##########
benchmarks/src/test/java/org/apache/druid/benchmark/indexing/SqlSegmentsMetadataQueryBenchmark.java:
##########
@@ -0,0 +1,95 @@
+package org.apache.druid.benchmark.indexing;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.druid.java.util.common.DateTimes;
+import org.apache.druid.java.util.common.granularity.Granularities;
+import org.apache.druid.java.util.common.parsers.CloseableIterator;
+import org.apache.druid.metadata.IndexerSqlMetadataStorageCoordinatorTestBase;
+import org.apache.druid.metadata.MetadataStorageTablesConfig;
+import org.apache.druid.metadata.SqlSegmentsMetadataQuery;
+import org.apache.druid.metadata.TestDerbyConnector;
+import org.apache.druid.metadata.storage.derby.DerbyConnector;
+import org.apache.druid.segment.TestDataSource;
+import org.apache.druid.segment.TestHelper;
+import org.apache.druid.server.coordinator.CreateDataSegments;
+import org.apache.druid.timeline.DataSegment;
+import org.joda.time.DateTime;
+import org.joda.time.Interval;
+import org.openjdk.jmh.annotations.*;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+
+@State(Scope.Benchmark)
+@Fork(value = 1)
+@Warmup(iterations = 1, time = 1)
+@Measurement(iterations = 20, time = 2)
+@BenchmarkMode({Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+public class SqlSegmentsMetadataQueryBenchmark {
+
+  private static final DateTime JAN_1 = DateTimes.of("2025-01-01");
+  private static final String V1 = JAN_1.toString();
+  private static final List<DataSegment> WIKI_SEGMENTS_2X5D
+          = CreateDataSegments.ofDatasource(TestDataSource.WIKI)
+          .forIntervals(10, Granularities.DAY)
+          .withNumPartitions(20)
+          .startingAt(JAN_1)
+          .withVersion(V1)
+          .eachOfSizeInMb(500);
+  @Param({
+          "2025-01-01T00:00:00.000Z/2025-01-02T00:00:00.000Z",
+          "2025-01-09T00:00:00.000Z/2025-01-10T00:00:00.000Z",
+          "2025-01-01T00:00:00.000Z/2025-01-05T00:00:00.000Z",
+          "2025-01-01T00:00:00.000Z/2025-01-10T00:00:00.000Z",
+  })
+  private String queryIntervalStr;
+  private Interval queryInterval;
+  private TestDerbyConnector.DerbyConnectorRule derbyConnectorRule;
+
+  @Setup(Level.Trial)
+  public void setup() throws Exception {
+    this.derbyConnectorRule = new TestDerbyConnector.DerbyConnectorRule();

Review Comment:
   Do we need to use a rule here? Rule is more of a JUnit thing.
   It seems like we could have just used the `TestDerbyConnector` directly.
   We can add some small utility methods to `TestDerbyConnector` if needed.



##########
benchmarks/src/test/java/org/apache/druid/benchmark/indexing/SqlSegmentsMetadataQueryBenchmark.java:
##########
@@ -0,0 +1,95 @@
+package org.apache.druid.benchmark.indexing;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.druid.java.util.common.DateTimes;
+import org.apache.druid.java.util.common.granularity.Granularities;
+import org.apache.druid.java.util.common.parsers.CloseableIterator;
+import org.apache.druid.metadata.IndexerSqlMetadataStorageCoordinatorTestBase;
+import org.apache.druid.metadata.MetadataStorageTablesConfig;
+import org.apache.druid.metadata.SqlSegmentsMetadataQuery;
+import org.apache.druid.metadata.TestDerbyConnector;
+import org.apache.druid.metadata.storage.derby.DerbyConnector;
+import org.apache.druid.segment.TestDataSource;
+import org.apache.druid.segment.TestHelper;
+import org.apache.druid.server.coordinator.CreateDataSegments;
+import org.apache.druid.timeline.DataSegment;
+import org.joda.time.DateTime;
+import org.joda.time.Interval;
+import org.openjdk.jmh.annotations.*;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+
+@State(Scope.Benchmark)
+@Fork(value = 1)
+@Warmup(iterations = 1, time = 1)
+@Measurement(iterations = 20, time = 2)
+@BenchmarkMode({Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+public class SqlSegmentsMetadataQueryBenchmark {

Review Comment:
   Please format this file using Druid style (auto-format in IDEA should do the 
trick).



##########
benchmarks/src/test/java/org/apache/druid/benchmark/indexing/SqlSegmentsMetadataQueryBenchmark.java:
##########
@@ -0,0 +1,95 @@
+package org.apache.druid.benchmark.indexing;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.druid.java.util.common.DateTimes;
+import org.apache.druid.java.util.common.granularity.Granularities;
+import org.apache.druid.java.util.common.parsers.CloseableIterator;
+import org.apache.druid.metadata.IndexerSqlMetadataStorageCoordinatorTestBase;
+import org.apache.druid.metadata.MetadataStorageTablesConfig;
+import org.apache.druid.metadata.SqlSegmentsMetadataQuery;
+import org.apache.druid.metadata.TestDerbyConnector;
+import org.apache.druid.metadata.storage.derby.DerbyConnector;
+import org.apache.druid.segment.TestDataSource;
+import org.apache.druid.segment.TestHelper;
+import org.apache.druid.server.coordinator.CreateDataSegments;
+import org.apache.druid.timeline.DataSegment;
+import org.joda.time.DateTime;
+import org.joda.time.Interval;
+import org.openjdk.jmh.annotations.*;

Review Comment:
   Please avoid wildcard imports.



##########
benchmarks/src/test/java/org/apache/druid/benchmark/indexing/SqlSegmentsMetadataQueryBenchmark.java:
##########
@@ -0,0 +1,95 @@
+package org.apache.druid.benchmark.indexing;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.druid.java.util.common.DateTimes;
+import org.apache.druid.java.util.common.granularity.Granularities;
+import org.apache.druid.java.util.common.parsers.CloseableIterator;
+import org.apache.druid.metadata.IndexerSqlMetadataStorageCoordinatorTestBase;
+import org.apache.druid.metadata.MetadataStorageTablesConfig;
+import org.apache.druid.metadata.SqlSegmentsMetadataQuery;
+import org.apache.druid.metadata.TestDerbyConnector;
+import org.apache.druid.metadata.storage.derby.DerbyConnector;
+import org.apache.druid.segment.TestDataSource;
+import org.apache.druid.segment.TestHelper;
+import org.apache.druid.server.coordinator.CreateDataSegments;
+import org.apache.druid.timeline.DataSegment;
+import org.joda.time.DateTime;
+import org.joda.time.Interval;
+import org.openjdk.jmh.annotations.*;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+
+@State(Scope.Benchmark)
+@Fork(value = 1)
+@Warmup(iterations = 1, time = 1)
+@Measurement(iterations = 20, time = 2)
+@BenchmarkMode({Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+public class SqlSegmentsMetadataQueryBenchmark {
+
+  private static final DateTime JAN_1 = DateTimes.of("2025-01-01");
+  private static final String V1 = JAN_1.toString();
+  private static final List<DataSegment> WIKI_SEGMENTS_2X5D

Review Comment:
   Also, for the purposes of the benchmark, we would ideally need more 
segments, say 100 days with 1000 segments per day.



##########
benchmarks/src/test/java/org/apache/druid/benchmark/indexing/SqlSegmentsMetadataQueryBenchmark.java:
##########
@@ -0,0 +1,95 @@
+package org.apache.druid.benchmark.indexing;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.druid.java.util.common.DateTimes;
+import org.apache.druid.java.util.common.granularity.Granularities;
+import org.apache.druid.java.util.common.parsers.CloseableIterator;
+import org.apache.druid.metadata.IndexerSqlMetadataStorageCoordinatorTestBase;
+import org.apache.druid.metadata.MetadataStorageTablesConfig;
+import org.apache.druid.metadata.SqlSegmentsMetadataQuery;
+import org.apache.druid.metadata.TestDerbyConnector;
+import org.apache.druid.metadata.storage.derby.DerbyConnector;
+import org.apache.druid.segment.TestDataSource;
+import org.apache.druid.segment.TestHelper;
+import org.apache.druid.server.coordinator.CreateDataSegments;
+import org.apache.druid.timeline.DataSegment;
+import org.joda.time.DateTime;
+import org.joda.time.Interval;
+import org.openjdk.jmh.annotations.*;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+
+@State(Scope.Benchmark)
+@Fork(value = 1)
+@Warmup(iterations = 1, time = 1)
+@Measurement(iterations = 20, time = 2)
+@BenchmarkMode({Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+public class SqlSegmentsMetadataQueryBenchmark {
+
+  private static final DateTime JAN_1 = DateTimes.of("2025-01-01");
+  private static final String V1 = JAN_1.toString();
+  private static final List<DataSegment> WIKI_SEGMENTS_2X5D
+          = CreateDataSegments.ofDatasource(TestDataSource.WIKI)
+          .forIntervals(10, Granularities.DAY)
+          .withNumPartitions(20)
+          .startingAt(JAN_1)
+          .withVersion(V1)
+          .eachOfSizeInMb(500);
+  @Param({

Review Comment:
   Please add a newline before this.



##########
server/src/test/java/org/apache/druid/metadata/TestDerbyConnector.java:
##########
@@ -178,6 +178,17 @@ public PendingSegmentsTable pendingSegments()
     {
       return new PendingSegmentsTable(this);
     }
+
+    public void beforeBenchmark()
+    {
+      before();
+    }
+
+    public void afterBenchmark()
+    {
+      this.getConnector().deleteAllSegmentRecords();
+      after();
+    }

Review Comment:
   We can avoid having these methods. Just make `before` and `after` public if 
needed.



##########
benchmarks/src/test/java/org/apache/druid/benchmark/indexing/SqlSegmentsMetadataQueryBenchmark.java:
##########
@@ -0,0 +1,95 @@
+package org.apache.druid.benchmark.indexing;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.druid.java.util.common.DateTimes;
+import org.apache.druid.java.util.common.granularity.Granularities;
+import org.apache.druid.java.util.common.parsers.CloseableIterator;
+import org.apache.druid.metadata.IndexerSqlMetadataStorageCoordinatorTestBase;
+import org.apache.druid.metadata.MetadataStorageTablesConfig;
+import org.apache.druid.metadata.SqlSegmentsMetadataQuery;
+import org.apache.druid.metadata.TestDerbyConnector;
+import org.apache.druid.metadata.storage.derby.DerbyConnector;
+import org.apache.druid.segment.TestDataSource;
+import org.apache.druid.segment.TestHelper;
+import org.apache.druid.server.coordinator.CreateDataSegments;
+import org.apache.druid.timeline.DataSegment;
+import org.joda.time.DateTime;
+import org.joda.time.Interval;
+import org.openjdk.jmh.annotations.*;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+
+@State(Scope.Benchmark)
+@Fork(value = 1)
+@Warmup(iterations = 1, time = 1)
+@Measurement(iterations = 20, time = 2)
+@BenchmarkMode({Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+public class SqlSegmentsMetadataQueryBenchmark {
+
+  private static final DateTime JAN_1 = DateTimes.of("2025-01-01");
+  private static final String V1 = JAN_1.toString();
+  private static final List<DataSegment> WIKI_SEGMENTS_2X5D

Review Comment:
   Given that there are 10 day worth of segments with 20 partitions in each 
day, name should be `WIKI_SEGMENTS_20X10D`



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to