This is an automated email from the ASF dual-hosted git repository. vanzin pushed a commit to branch branch-2.4 in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/branch-2.4 by this push: new 9ae7393 [SPARK-29263][CORE][TEST][FOLLOWUP][2.4] Fix build failure of `TaskSchedulerImplSuite` 9ae7393 is described below commit 9ae73932bb749bde6b71cbe6cf595ec2d23b60ea Author: Xingbo Jiang <xingbo.ji...@databricks.com> AuthorDate: Fri Sep 27 16:31:23 2019 -0700 [SPARK-29263][CORE][TEST][FOLLOWUP][2.4] Fix build failure of `TaskSchedulerImplSuite` ### What changes were proposed in this pull request? https://github.com/apache/spark/pull/25946 Fixed a bug and modified the `TaskSchedulerImplSuite`, when backported to 2.4 it breaks the build. This PR is to fix the broken test build. ### How was this patch tested? Passed locally. Closes #25952 from jiangxb1987/SPARK-29263. Authored-by: Xingbo Jiang <xingbo.ji...@databricks.com> Signed-off-by: Marcelo Vanzin <van...@cloudera.com> --- .../org/apache/spark/scheduler/TaskSchedulerImplSuite.scala | 10 +++++++--- .../scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala index 5c0601eb03..ecbb6ab 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala @@ -77,7 +77,11 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B } def setupScheduler(confs: (String, String)*): TaskSchedulerImpl = { - val conf = new SparkConf().setMaster("local").setAppName("TaskSchedulerImplSuite") + setupSchedulerWithMaster("local", confs: _*) + } + + def setupSchedulerWithMaster(master: String, confs: (String, String)*): TaskSchedulerImpl = { + val conf = new SparkConf().setMaster(master).setAppName("TaskSchedulerImplSuite") confs.foreach { case (k, v) => conf.set(k, v) } sc = new SparkContext(conf) taskScheduler = new TaskSchedulerImpl(sc) @@ -1129,7 +1133,7 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B // you'd need the previous stage to also get restarted, and then succeed, in between each // attempt, but that happens outside what we're mocking here.) val zombieAttempts = (0 until 2).map { stageAttempt => - val attempt = FakeTask.createTaskSet(10, stageAttemptId = stageAttempt) + val attempt = FakeTask.createTaskSet(10, stageId = 0, stageAttemptId = stageAttempt) taskScheduler.submitTasks(attempt) val tsm = taskScheduler.taskSetManagerForAttempt(0, stageAttempt).get val offers = (0 until 10).map{ idx => WorkerOffer(s"exec-$idx", s"host-$idx", 1) } @@ -1148,7 +1152,7 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B // we've now got 2 zombie attempts, each with 9 tasks still active. Submit the 3rd attempt for // the stage, but this time with insufficient resources so not all tasks are active. - val finalAttempt = FakeTask.createTaskSet(10, stageAttemptId = 2) + val finalAttempt = FakeTask.createTaskSet(10, stageId = 0, stageAttemptId = 2) taskScheduler.submitTasks(finalAttempt) val finalTsm = taskScheduler.taskSetManagerForAttempt(0, 2).get val offers = (0 until 5).map{ idx => WorkerOffer(s"exec-$idx", s"host-$idx", 1) } diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala index d264ada..93a4b1f 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala @@ -1398,7 +1398,7 @@ class TaskSetManagerSuite extends SparkFunSuite with LocalSparkContext with Logg assert(taskSetManager1.isZombie) assert(taskSetManager1.runningTasks === 9) - val taskSet2 = FakeTask.createTaskSet(10, stageAttemptId = 1) + val taskSet2 = FakeTask.createTaskSet(10, stageId = 0, stageAttemptId = 1) sched.submitTasks(taskSet2) sched.resourceOffers( (11 until 20).map { idx => WorkerOffer(s"exec-$idx", s"host-$idx", 1) }) --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org