clintropolis commented on a change in pull request #8578: parallel broker 
merges on fork join pool
URL: https://github.com/apache/incubator-druid/pull/8578#discussion_r335410227
 
 

 ##########
 File path: 
core/src/main/java/org/apache/druid/java/util/common/guava/ParallelMergeCombiningSequence.java
 ##########
 @@ -0,0 +1,1071 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.java.util.common.guava;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Ordering;
+import org.apache.druid.java.util.common.RE;
+import org.apache.druid.java.util.common.logger.Logger;
+import org.apache.druid.utils.JvmUtils;
+
+import javax.annotation.Nullable;
+import java.io.IOException;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Objects;
+import java.util.PriorityQueue;
+import java.util.Queue;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.RecursiveAction;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BinaryOperator;
+
+/**
+ * Artisanal, locally-sourced, hand-crafted, gluten and GMO free, bespoke, 
small-batch parallel merge combinining sequence
+ */
+public class ParallelMergeCombiningSequence<T> extends YieldingSequenceBase<T>
+{
+  private static final Logger LOG = new 
Logger(ParallelMergeCombiningSequence.class);
+
+  private final ForkJoinPool workerPool;
+  private final List<Sequence<T>> baseSequences;
+  private final Ordering<T> orderingFn;
+  private final BinaryOperator<T> combineFn;
+  private final int queueSize;
+  private final boolean hasTimeout;
+  private final long timeoutAtNanos;
+  private final int queryPriority; // not currently used :(
+  private final int yieldAfter;
+  private final int batchSize;
+  private final int parallelism;
+  private final CancellationGizmo cancellationGizmo;
+
+  public ParallelMergeCombiningSequence(
+      ForkJoinPool workerPool,
+      List<Sequence<T>> baseSequences,
+      Ordering<T> orderingFn,
+      BinaryOperator<T> combineFn,
+      boolean hasTimeout,
+      long timeoutMillis,
+      int queryPriority,
+      int parallelism,
+      int yieldAfter,
+      int batchSize
+  )
+  {
+    this.workerPool = workerPool;
+    this.baseSequences = baseSequences;
+    this.orderingFn = orderingFn;
+    this.combineFn = combineFn;
+    this.hasTimeout = hasTimeout;
+    this.timeoutAtNanos = System.nanoTime() + 
TimeUnit.NANOSECONDS.convert(timeoutMillis, TimeUnit.MILLISECONDS);
+    this.queryPriority = queryPriority;
+    this.parallelism = parallelism;
+    this.yieldAfter = yieldAfter;
+    this.batchSize = batchSize;
+    this.queueSize = 4 * (yieldAfter / batchSize);
+    this.cancellationGizmo = new CancellationGizmo();
+  }
+
+  @Override
+  public <OutType> Yielder<OutType> toYielder(OutType initValue, 
YieldingAccumulator<OutType, T> accumulator)
+  {
+    if (baseSequences.isEmpty()) {
+      return Sequences.<T>empty().toYielder(initValue, accumulator);
+    }
+
+    final BlockingQueue<ResultBatch<T>> outputQueue = new 
ArrayBlockingQueue<>(queueSize);
+    MergeCombinePartitioningAction<T> finalMergeAction = new 
MergeCombinePartitioningAction<>(
+        baseSequences,
+        orderingFn,
+        combineFn,
+        outputQueue,
+        queueSize,
+        parallelism,
+        yieldAfter,
+        batchSize,
+        hasTimeout,
+        timeoutAtNanos,
+        cancellationGizmo
+    );
+    workerPool.execute(finalMergeAction);
+    Sequence<T> finalOutSequence = makeOutputSequenceForQueue(outputQueue, 
hasTimeout, timeoutAtNanos, cancellationGizmo);
+    return finalOutSequence.toYielder(initValue, accumulator);
+  }
+
+  /**
+   * Create an output {@link Sequence} that wraps the output {@link 
BlockingQueue} of a
+   * {@link MergeCombinePartitioningAction}
+   */
+  static <T> Sequence<T> makeOutputSequenceForQueue(
+      BlockingQueue<ResultBatch<T>> queue,
+      boolean hasTimeout,
+      long timeoutAtNanos,
+      CancellationGizmo cancellationGizmo
+  )
+  {
+    return new BaseSequence<>(
+        new BaseSequence.IteratorMaker<T, Iterator<T>>()
+        {
+          @Override
+          public Iterator<T> make()
+          {
+            return new Iterator<T>()
+            {
+              private ResultBatch<T> currentBatch;
+
+              @Override
+              public boolean hasNext()
+              {
+                final long thisTimeoutNanos = timeoutAtNanos - 
System.nanoTime();
+                if (thisTimeoutNanos < 0) {
+                  throw new RE(new TimeoutException("Sequence iterator timed 
out"));
+                }
+
+                if (currentBatch != null && !currentBatch.isTerminalResult() 
&& !currentBatch.isDrained()) {
+                  return true;
+                }
+                try {
+                  if (currentBatch == null || currentBatch.isDrained()) {
+                    if (hasTimeout) {
+                      currentBatch = queue.poll(thisTimeoutNanos, 
TimeUnit.NANOSECONDS);
+                    } else {
+                      currentBatch = queue.take();
+                    }
+                  }
+                  if (currentBatch == null) {
+                    throw new RE(new TimeoutException("Sequence iterator timed 
out waiting for data"));
+                  }
+
+                  if (cancellationGizmo.isCancelled()) {
+                    throw cancellationGizmo.getRuntimeException();
+                  }
+
+                  if (currentBatch.isTerminalResult()) {
+                    return false;
+                  }
+                  return true;
+                }
+                catch (InterruptedException e) {
+                  throw new RE(e);
+                }
+              }
+
+              @Override
+              public T next()
+              {
+                if (cancellationGizmo.isCancelled()) {
+                  throw cancellationGizmo.getRuntimeException();
+                }
+
+                if (currentBatch == null || currentBatch.isDrained() || 
currentBatch.isTerminalResult()) {
+                  throw new NoSuchElementException();
+                }
+                return currentBatch.next();
+              }
+            };
+          }
+
+          @Override
+          public void cleanup(Iterator<T> iterFromMake)
+          {
+            // nothing to cleanup
+          }
+        }
+    );
+  }
+
+  /**
+   * This {@link RecursiveAction} is the initial task of the parallel 
merge-combine process. Capacity and input sequence
+   * count permitting, it will partition the input set of {@link Sequence} to 
do 2 layer parallel merge.
+   *
+   * For the first layer, the partitions of input sequences are each wrapped 
in {@link YielderBatchedResultsCursor}, and
+   * for each partition a {@link PrepareMergeCombineInputsAction} will be 
executed to wait for each of the yielders to
+   * yield {@link ResultBatch}. After the cursors all have an initial set of 
results, the
+   * {@link PrepareMergeCombineInputsAction} will execute a {@link 
MergeCombineAction}
+   * to perform the actual work of merging sequences and combining results. 
The merged and combined output of each
+   * partition will itself be put into {@link ResultBatch} and pushed to a 
{@link BlockingQueue} with a
+   * {@link ForkJoinPool} {@link QueuePusher}.
+   *
+   * The second layer will execute a single {@link 
PrepareMergeCombineInputsAction} to wait for the {@link ResultBatch}
+   * from each partition to be available in their 'output' {@link 
BlockingQueue} which each is wrapped in
+   * {@link BlockingQueueuBatchedResultsCursor}. Like the first layer, after 
the {@link PrepareMergeCombineInputsAction}
+   * is complete and some {@link ResultBatch} are ready to merge from each 
partition, it will execute a
+   * {@link MergeCombineAction} do a final merge combine of all the parallel 
computed results, again pushing
+   * {@link ResultBatch} into a {@link BlockingQueue} with a {@link 
QueuePusher}.
+   */
+  private static class MergeCombinePartitioningAction<T> extends 
RecursiveAction
+  {
+    private final List<Sequence<T>> sequences;
+    private final Ordering<T> orderingFn;
+    private final BinaryOperator<T> combineFn;
+    private final BlockingQueue<ResultBatch<T>> out;
+    private final int queueSize;
+    private final int parallelism;
+    private final int yieldAfter;
+    private final int batchSize;
+    private final boolean hasTimeout;
+    private final long timeoutAt;
+    private final CancellationGizmo cancellationGizmo;
+
+    private MergeCombinePartitioningAction(
+        List<Sequence<T>> sequences,
+        Ordering<T> orderingFn,
+        BinaryOperator<T> combineFn,
+        BlockingQueue<ResultBatch<T>> out,
+        int queueSize,
+        int parallelism,
+        int yieldAfter,
+        int batchSize,
+        boolean hasTimeout,
+        long timeoutAt,
+        CancellationGizmo cancellationGizmo
+    )
+    {
+      this.sequences = sequences;
+      this.combineFn = combineFn;
+      this.orderingFn = orderingFn;
+      this.out = out;
+      this.queueSize = queueSize;
+      this.parallelism = parallelism;
+      this.yieldAfter = yieldAfter;
+      this.batchSize = batchSize;
+      this.hasTimeout = hasTimeout;
+      this.timeoutAt = timeoutAt;
+      this.cancellationGizmo = cancellationGizmo;
+    }
+
+    @Override
+    protected void compute()
+    {
+      try {
+        final int parallelTaskCount = computeNumTasks();
+
+        // if we have a small number of sequences to merge, or computed 
paralellism is too low, do not run in parallel,
+        // just serially perform the merge-combine with a single task
+        if (sequences.size() < 4 || parallelTaskCount < 2) {
+          LOG.debug(
+              "Input sequence count (%s) or available parallel merge task 
count (%s) too small to perform parallel"
+              + " merge-combine, performing serially with a single 
merge-combine task",
+              sequences.size(),
+              parallelTaskCount
+          );
+
+          QueuePusher<ResultBatch<T>> resultsPusher = new QueuePusher<>(out, 
hasTimeout, timeoutAt);
+
+          List<BatchedResultsCursor<T>> sequenceCursors = new 
ArrayList<>(sequences.size());
+          for (Sequence<T> s : sequences) {
+            sequenceCursors.add(new YielderBatchedResultsCursor<>(new 
SequenceBatcher<>(s, batchSize), orderingFn));
+          }
+          PrepareMergeCombineInputsAction<T> blockForInputsAction = new 
PrepareMergeCombineInputsAction<>(
+              sequenceCursors,
+              resultsPusher,
+              orderingFn,
+              combineFn,
+              yieldAfter,
+              batchSize,
+              cancellationGizmo
+          );
+          getPool().execute(blockForInputsAction);
+        } else {
+          // 2 layer parallel merge done in fjp
+          LOG.debug("Spawning %s parallel merge-combine tasks for %s 
sequences", parallelTaskCount, sequences.size());
+          spawnParallelTasks(parallelTaskCount);
+        }
+      }
+      catch (Exception ex) {
+        cancellationGizmo.cancel(ex);
+        out.offer(ResultBatch.TERMINAL);
+      }
+    }
+
+    private void spawnParallelTasks(int parallelMergeTasks)
+    {
+      List<RecursiveAction> tasks = new ArrayList<>();
+      List<BlockingQueue<ResultBatch<T>>> intermediaryOutputs = new 
ArrayList<>(parallelMergeTasks);
+
+      List<? extends List<Sequence<T>>> partitions =
+          Lists.partition(sequences, sequences.size() / parallelMergeTasks);
+
+      for (List<Sequence<T>> partition : partitions) {
+        BlockingQueue<ResultBatch<T>> outputQueue = new 
ArrayBlockingQueue<>(queueSize);
+        intermediaryOutputs.add(outputQueue);
+        QueuePusher<ResultBatch<T>> pusher = new QueuePusher<>(outputQueue, 
hasTimeout, timeoutAt);
+
+        List<BatchedResultsCursor<T>> partitionCursors = new 
ArrayList<>(sequences.size());
+        for (Sequence<T> s : partition) {
+          partitionCursors.add(new YielderBatchedResultsCursor<>(new 
SequenceBatcher<>(s, batchSize), orderingFn));
+        }
+        PrepareMergeCombineInputsAction<T> blockForInputsAction = new 
PrepareMergeCombineInputsAction<>(
+            partitionCursors,
+            pusher,
+            orderingFn,
+            combineFn,
+            yieldAfter,
+            batchSize,
+            cancellationGizmo
+        );
+        tasks.add(blockForInputsAction);
+      }
+
+      for (RecursiveAction task : tasks) {
+        getPool().execute(task);
+      }
+
+      QueuePusher<ResultBatch<T>> outputPusher = new QueuePusher<>(out, 
hasTimeout, timeoutAt);
+      List<BatchedResultsCursor<T>> intermediaryOutputsCursors = new 
ArrayList<>(intermediaryOutputs.size());
+      for (BlockingQueue<ResultBatch<T>> queue : intermediaryOutputs) {
+        intermediaryOutputsCursors.add(
+            new BlockingQueueuBatchedResultsCursor<>(queue, orderingFn, 
hasTimeout, timeoutAt)
+        );
+      }
+      PrepareMergeCombineInputsAction<T> finalMergeAction = new 
PrepareMergeCombineInputsAction<>(
+          intermediaryOutputsCursors,
+          outputPusher,
+          orderingFn,
+          combineFn,
+          yieldAfter,
+          batchSize,
+          cancellationGizmo
+      );
+
+      getPool().execute(finalMergeAction);
+    }
+
+    /**
+     * Computes maximum number of layer 1 parallel merging tasks given 
available processors and an estimate of current
+     * {@link ForkJoinPool} utilization. A return value of 1 or less indicates 
that a serial merge will be done on
+     * the pool instead.
+     */
+    private int computeNumTasks()
+    {
+      final int availableProcessors = 
JvmUtils.getRuntimeInfo().getAvailableProcessors();
+      final int runningThreadCount = getPool().getRunningThreadCount();
+      final int submissionCount = getPool().getQueuedSubmissionCount();
+      // max is minimum of either number of processors or user suggested 
parallelism
+      final int maxParallelism = Math.min(availableProcessors, parallelism);
+      // adjust max to be no more than total pool parallelism less the number 
of running threads + submitted tasks
+      final int utilizationEstimate = runningThreadCount + submissionCount;
+      // minimum of 'max computed parallelism' and pool parallelism less 
current 'utilization estimate'
+      final int computedParallelism = Math.min(maxParallelism, 
getPool().getParallelism() - utilizationEstimate);
+      // compute total number of layer 1 'parallel' tasks, the final merge 
task will take the remaining slot
+      // we divide the sequences by 2 because we need at least 2 sequences per 
partition for it to make sense to need
+      // an additional parallel task to compute the merge
+      final int computedOptimalParallelism = Math.min(
+          (int) Math.floor((double) sequences.size() / 2.0),
+          computedParallelism - 1
+      );
+
+      final int computedNumParallelTasks = 
Math.max(computedOptimalParallelism, 1);
+
+      LOG.debug("Computed parallel tasks: [%s]; ForkJoinPool details - 
processors: [%s] parallelism: [%s] "
+                + "active threads: [%s] running threads: [%s] queued 
submissions: [%s] queued tasks: [%s] "
+                + "pool size: [%s] steal count: [%s]",
+                computedNumParallelTasks,
+                availableProcessors,
+                parallelism,
+                getPool().getActiveThreadCount(),
+                runningThreadCount,
+                submissionCount,
+                getPool().getQueuedTaskCount(),
+                getPool().getPoolSize(),
+                getPool().getStealCount()
+      );
+
+      return computedNumParallelTasks;
+    }
+  }
+
+
+  /**
+   * This {@link RecursiveAction} is the work-horse of the {@link 
ParallelMergeCombiningSequence}, it merge-combines
+   * a set of {@link BatchedResultsCursor} and produces output to a {@link 
BlockingQueue} with the help of a
+   * {@link QueuePusher}. This is essentially a composite of logic taken from 
{@link MergeSequence} and
+   * {@link org.apache.druid.common.guava.CombiningSequence}, where the {@link 
Ordering} is used to both set the sort
+   * order for a {@link PriorityQueue}, and as a comparison to determine if 
'same' ordered results need to be combined
+   * with a supplied {@link BinaryOperator} combining function.
+   *
+   * This task takes a {@link #yieldAfter} parameter which controls how many 
input result rows will be processed before
+   * this task completes and executes a new task to continue where it left 
off. This value is initially set by the
+   * {@link MergeCombinePartitioningAction} to a default value, but after that 
this process is timed to try and compute
+   * an 'optimal' number of rows to yield to achieve a task runtime of ~10ms, 
on the assumption that the time to process
+   * n results will be approximately the same. {@link #recursionDepth} is used 
to track how many times a task has
+   * continued executing, and utilized to compute a cumulative moving average 
of task run time per amount yielded in
+   * order to 'smooth' out the continual adjustment.
+   */
+  private static class MergeCombineAction<T> extends RecursiveAction
+  {
+    private final PriorityQueue<BatchedResultsCursor<T>> pQueue;
+    private final Ordering<T> orderingFn;
+    private final BinaryOperator<T> combineFn;
+    private final QueuePusher<ResultBatch<T>> outputQueue;
+    private final T initialValue;
+    private final int yieldAfter;
+    private final int batchSize;
+    private final int recursionDepth;
+    private final CancellationGizmo cancellationGizmo;
+
+    private MergeCombineAction(
+        PriorityQueue<BatchedResultsCursor<T>> pQueue,
+        QueuePusher<ResultBatch<T>> outputQueue,
+        Ordering<T> orderingFn,
+        BinaryOperator<T> combineFn,
+        T initialValue,
+        int yieldAfter,
+        int batchSize,
+        int recursionDepth,
+        CancellationGizmo cancellationGizmo
+    )
+    {
+      this.pQueue = pQueue;
+      this.orderingFn = orderingFn;
+      this.combineFn = combineFn;
+      this.outputQueue = outputQueue;
+      this.initialValue = initialValue;
+      this.yieldAfter = yieldAfter;
+      this.batchSize = batchSize;
+      this.recursionDepth = recursionDepth;
+      this.cancellationGizmo = cancellationGizmo;
+    }
+
+    @Override
+    protected void compute()
+    {
+      try {
+        long start = System.nanoTime();
+
+        int counter = 0;
+        int batchCounter = 0;
+        ResultBatch<T> outputBatch = new ResultBatch<>(batchSize);
+
+        T currentCombinedValue = initialValue;
+        while (counter++ < yieldAfter && !pQueue.isEmpty()) {
+          BatchedResultsCursor<T> cursor = pQueue.poll();
+
+          // push the queue along
+          if (!cursor.isDone()) {
+            T nextValueToAccumulate = cursor.get();
+
+            cursor.advance();
+            if (!cursor.isDone()) {
+              pQueue.offer(cursor);
+            } else {
+              cursor.close();
+            }
+
+            // if current value is null, combine null with next value
+            if (currentCombinedValue == null) {
+              currentCombinedValue = combineFn.apply(null, 
nextValueToAccumulate);
+              continue;
+            }
+
+            // if current value is "same" as next value, combine them
+            if (orderingFn.compare(currentCombinedValue, 
nextValueToAccumulate) == 0) {
+              currentCombinedValue = combineFn.apply(currentCombinedValue, 
nextValueToAccumulate);
+              continue;
+            }
+
+            // else, push accumulated value to the queue, accumulate again 
with next value as initial
+            outputBatch.add(currentCombinedValue);
+            batchCounter++;
+            if (batchCounter >= batchSize) {
+              outputQueue.offer(outputBatch);
+              outputBatch = new ResultBatch<>(batchSize);
+              batchCounter = 0;
+            }
+
+            // next value is now current value
+            currentCombinedValue = combineFn.apply(null, 
nextValueToAccumulate);
+          } else {
+            cursor.close();
+          }
+        }
+
+        if (!pQueue.isEmpty() && !cancellationGizmo.isCancelled()) {
+          // if there is still work to be done, execute a new task with the 
current accumulated value to continue
+          // combining where we left off
+          if (!outputBatch.isDrained()) {
+            outputQueue.offer(outputBatch);
+          }
+
+          // measure the time it took to process 'yieldAfter' elements in 
order to project a next 'yieldAfter' value
+          // which we want to target a 10ms task run time. smooth this value 
with a cumulative moving average in order
+          // to prevent normal jitter in processing time from skewing the next 
yield value too far in any direction
+          final long elapsedMillis = Math.max(
+              TimeUnit.MILLISECONDS.convert(System.nanoTime() - start, 
TimeUnit.NANOSECONDS),
+              1L
+          );
+          final double nextYieldAfter = Math.max(10.0 * ((double) yieldAfter / 
elapsedMillis), 1.0);
+          final double cumulativeMovingAverage = (nextYieldAfter + 
(recursionDepth * yieldAfter)) / (recursionDepth + 1);
+          final int adjustedNextYieldAfter = (int) 
Math.ceil(cumulativeMovingAverage);
 
 Review comment:
   Thanks for digging in on this @himanshug and making me really have a closer 
look at this area in particular, since I think it really is one of the most 
important parts to ensure that the pool operates well when it is heavily 
saturated. I've been working on a benchmark that directly tests 
`ParallelMergeCombiningSequence` in order to try out the differences between 
10ms and 100ms, using wall time vs using cpu time, behavior when overloaded, 
etc. 
   
   While I haven't quite got to that yet, in the process I have discovered a 
mistake that heavily impacted performance when the pool was overloaded when 
computing the level of parallelism: 
https://github.com/apache/incubator-druid/pull/8578/files#diff-84792f9d3cefe47cbb471669dce2a276R374,
 specifically that it was using the pool parallelism instead of the processor 
count, which meant it was overestimating it's capacity for work. These 
benchmarks are not pushed yet, and will probably change a lot as I clean them 
up, but the rough explanation is
   
   * `testJustOne` uses `ParallelMergeCombiningSequence` and a single jmh 
thread on empty fork join pool
   * `testManyParallel` does 24 jmh threads with 
`ParallelMergeCombiningSequence` all being evaluated at the same time (this was 
my suspected worst case for the greedy parallelism chooser)
   * `testManyParallelJitter` does 24 jmh threads with a random 0-50ms delay 
before starting work on the `ParallelMergeCombiningSequence` to give a more 
realistic load pattern where not everything is starting at exactly the same time
   * `testSerial` uses `CombiningSequence` wrapping a `MergeSequence` to do 
equivalent operation to `ParallelMergeCombiningSequence` 
   * `testSerialParallel` is 24 threads of `testSerial`
   * `testSerialParallelJitter` is `testSerialParallel` with 0-50ms of random 
jitter
   
   Benchmarks were run twice both with and without synchronized benchmark 
iterations. Synchronized to show performance of the exact scenario described 
for each benchmark above, and without synchronization which lets each thread do 
their iterations at their leisure and more likely emulating an actual sustained 
heavy load pattern. 
   
   ```
   pool parallelism - running + queued
   (no sync)
   Benchmark                                                         
(batchSize)  (numSequences)  (parallelism)  (rowsPerSequence)  
(targetTaskTimeMillis)  (yieldAfter)  Mode  Cnt     Score     Error  Units
   ParallelMergeCombiningSequenceBenchmark.testJustOne                       
128               8              4              75000                      10   
       1024  avgt   20    60.393 ±   4.754  ms/op
   ParallelMergeCombiningSequenceBenchmark.testManyParallel                  
128               8              4              75000                      10   
       1024  avgt   20  1389.137 ± 904.866  ms/op
   ParallelMergeCombiningSequenceBenchmark.testManyParallelJitter            
128               8              4              75000                      10   
       1024  avgt   20   647.165 ± 168.406  ms/op
   ParallelMergeCombiningSequenceBenchmark.testSerial                        
128               8              4              75000                      10   
       1024  avgt   20    75.212 ±   0.600  ms/op
   ParallelMergeCombiningSequenceBenchmark.testSerialParallel                
128               8              4              75000                      10   
       1024  avgt   20   509.219 ±   9.345  ms/op
   ParallelMergeCombiningSequenceBenchmark.testSerialParallelJitter          
128               8              4              75000                      10   
       1024  avgt   20   505.975 ±  12.986  ms/op
   
   (sync)
   Benchmark                                                         
(batchSize)  (numSequences)  (parallelism)  (rowsPerSequence)  
(targetTaskTimeMillis)  (yieldAfter)  Mode  Cnt     Score      Error  Units
   ParallelMergeCombiningSequenceBenchmark.testJustOne                       
128               8              4              75000                      10   
       1024  avgt   20    61.379 ±    5.957  ms/op
   ParallelMergeCombiningSequenceBenchmark.testManyParallel                  
128               8              4              75000                      10   
       1024  avgt   20  1883.554 ± 1614.321  ms/op
   ParallelMergeCombiningSequenceBenchmark.testManyParallelJitter            
128               8              4              75000                      10   
       1024  avgt   20   540.248 ±   17.194  ms/op
   ParallelMergeCombiningSequenceBenchmark.testSerial                        
128               8              4              75000                      10   
       1024  avgt   20    76.956 ±    0.768  ms/op
   ParallelMergeCombiningSequenceBenchmark.testSerialParallel                
128               8              4              75000                      10   
       1024  avgt   20   515.362 ±   14.206  ms/op
   ParallelMergeCombiningSequenceBenchmark.testSerialParallelJitter          
128               8              4              75000                      10   
       1024  avgt   20   507.267 ±   10.427  ms/op
   ```
   
   after the change to `final int computedParallelism = maxParallelism - 
utilizationEstimate`:
   
   ```
   num cores - (running + queue)
   (no sync)
   Benchmark                                                         
(batchSize)  (numSequences)  (parallelism)  (rowsPerSequence)  
(targetTaskTimeMillis)  (yieldAfter)  Mode  Cnt    Score    Error  Units
   ParallelMergeCombiningSequenceBenchmark.testJustOne                       
128               8              4              75000                      10   
       1024  avgt   20   49.720 ±  1.188  ms/op
   ParallelMergeCombiningSequenceBenchmark.testManyParallel                  
128               8              4              75000                      10   
       1024  avgt   20  533.131 ± 14.251  ms/op
   ParallelMergeCombiningSequenceBenchmark.testManyParallelJitter            
128               8              4              75000                      10   
       1024  avgt   20  539.346 ± 13.348  ms/op
   ParallelMergeCombiningSequenceBenchmark.testSerial                        
128               8              4              75000                      10   
       1024  avgt   20   75.876 ±  0.579  ms/op
   ParallelMergeCombiningSequenceBenchmark.testSerialParallel                
128               8              4              75000                      10   
       1024  avgt   20  500.047 ± 12.856  ms/op
   ParallelMergeCombiningSequenceBenchmark.testSerialParallelJitter          
128               8              4              75000                      10   
       1024  avgt   20  517.185 ± 11.089  ms/op
   
   (sync)
   Benchmark                                                         
(batchSize)  (numSequences)  (parallelism)  (rowsPerSequence)  
(targetTaskTimeMillis)  (yieldAfter)  Mode  Cnt    Score    Error  Units
   ParallelMergeCombiningSequenceBenchmark.testJustOne                       
128               8              4              75000                      10   
       1024  avgt   20   49.861 ±  1.088  ms/op
   ParallelMergeCombiningSequenceBenchmark.testManyParallel                  
128               8              4              75000                      10   
       1024  avgt   20  522.590 ± 16.520  ms/op
   ParallelMergeCombiningSequenceBenchmark.testManyParallelJitter            
128               8              4              75000                      10   
       1024  avgt   20  530.802 ± 18.761  ms/op
   ParallelMergeCombiningSequenceBenchmark.testSerial                        
128               8              4              75000                      10   
       1024  avgt   20   75.877 ±  0.882  ms/op
   ParallelMergeCombiningSequenceBenchmark.testSerialParallel                
128               8              4              75000                      10   
       1024  avgt   20  517.718 ± 12.978  ms/op
   ParallelMergeCombiningSequenceBenchmark.testSerialParallelJitter          
128               8              4              75000                      10   
       1024  avgt   20  520.770 ±  9.040  ms/op
   ```
   
   After the modification, things look a lot better. I'm going to clean up the 
benchmarks and probably split out the same threaded combining sequence wrapping 
a merge sequence reference benchmarks so they don't get run with parameters 
that don't apply, which will allow me to dig deep into the tuning matters about 
target task run time and how to measure it, batch size, as well as some 
additional variations like sequences with occasional blocking periods and stuff.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@druid.apache.org
For additional commands, e-mail: commits-h...@druid.apache.org

Reply via email to