This is an automated email from the ASF dual-hosted git repository.

zhuzh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 7e42493b021e3e012dd429e89588a54d8b151dfd
Author: sunxia <xingbe...@gmail.com>
AuthorDate: Thu Jun 15 18:12:45 2023 +0800

    [hotfix] Enrich hybridPartitionDataConsumeConstraint when create 
AdaptiveBatchScheduler in scheduler benchmark utils.
---
 .../flink/runtime/scheduler/benchmark/SchedulerBenchmarkUtils.java      | 2 ++
 1 file changed, 2 insertions(+)

diff --git 
a/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/SchedulerBenchmarkUtils.java
 
b/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/SchedulerBenchmarkUtils.java
index 2368e781ec4..a60b3fd584b 100644
--- 
a/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/SchedulerBenchmarkUtils.java
+++ 
b/flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/SchedulerBenchmarkUtils.java
@@ -135,6 +135,8 @@ public class SchedulerBenchmarkUtils {
         return schedulerBuilder
                 .setVertexParallelismAndInputInfosDecider(
                         
createCustomParallelismDecider(jobConfiguration.getParallelism()))
+                .setHybridPartitionDataConsumeConstraint(
+                        
jobConfiguration.getHybridPartitionDataConsumeConstraint())
                 .setInputConsumableDeciderFactory(
                         loadInputConsumableDeciderFactory(
                                 
jobConfiguration.getHybridPartitionDataConsumeConstraint()))

Reply via email to