This is an automated email from the ASF dual-hosted git repository. srowen pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push: new 1495ad8 [SPARK-33991][CORE][WEBUI] Repair enumeration conversion error for AllJobsPage 1495ad8 is described below commit 1495ad8c46197916527236331b57dce93aa3b8ec Author: yikf <13468507...@163.com> AuthorDate: Mon Jan 11 08:48:02 2021 -0600 [SPARK-33991][CORE][WEBUI] Repair enumeration conversion error for AllJobsPage ### What changes were proposed in this pull request? For `AllJobsPage `class, `AllJobsPage` gets the schedulingMode of enumerated type by loading the `spark.scheduler.mode `configuration from Sparkconf, but an enumeration conversion error occurs when I set the value of this configuration to lowercase. The reason for this problem is that the value of the `SchedulingMode `enumeration class is uppercase, which occurs when I configure `spark.scheduler.mode` to be lowercase. I saw that the `#org.apache.spark.scheduler.TaskSchedulerImpl` class convert the `spark. scheduler.mode` value to uppercase, so I think it should be converted in `AllJobsPage `as well. ### Why are the changes needed? An enumerated conversion error occurred with Spark when I set the value of this configuration to lowercase. ### How was this patch tested? Existing tests. Closes #31015 from yikf/master. Authored-by: yikf <13468507...@163.com> Signed-off-by: Sean Owen <sro...@gmail.com> --- .../org/apache/spark/ui/jobs/AllJobsPage.scala | 6 ++++-- .../scala/org/apache/spark/ui/UISeleniumSuite.scala | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/AllJobsPage.scala b/core/src/main/scala/org/apache/spark/ui/jobs/AllJobsPage.scala index cfe15eb..94821e4 100644 --- a/core/src/main/scala/org/apache/spark/ui/jobs/AllJobsPage.scala +++ b/core/src/main/scala/org/apache/spark/ui/jobs/AllJobsPage.scala @@ -19,7 +19,7 @@ package org.apache.spark.ui.jobs import java.net.URLEncoder import java.nio.charset.StandardCharsets.UTF_8 -import java.util.Date +import java.util.{Date, Locale} import javax.servlet.http.HttpServletRequest import scala.collection.mutable.ListBuffer @@ -277,15 +277,17 @@ private[ui] class AllJobsPage(parent: JobsTab, store: AppStatusStore) extends We s"${appSummary.numCompletedJobs}, only showing ${completedJobs.size}" } + // SPARK-33991 Avoid enumeration conversion error. val schedulingMode = store.environmentInfo().sparkProperties.toMap .get(SCHEDULER_MODE.key) - .map { mode => SchedulingMode.withName(mode).toString } + .map { mode => SchedulingMode.withName(mode.toUpperCase(Locale.ROOT)).toString } .getOrElse("Unknown") val summary: NodeSeq = <div> <ul class="list-unstyled"> <li> + <strong>User:</strong> {parent.getSparkUser} </li> diff --git a/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala b/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala index d7caeaa..d10260e 100644 --- a/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala +++ b/core/src/test/scala/org/apache/spark/ui/UISeleniumSuite.scala @@ -123,6 +123,27 @@ class UISeleniumSuite extends SparkFunSuite with WebBrowser with Matchers with B sc } + test("all jobs page should be rendered even though we configure the scheduling mode to fair") { + // Regression test for SPARK-33991 + val conf = Map("spark.scheduler.mode" -> "fair") + withSpark(newSparkContext(additionalConfs = conf)) { sc => + val rdd = sc.parallelize(0 to 100, 100).repartition(10).cache() + rdd.count() + + eventually(timeout(5.seconds), interval(50.milliseconds)) { + goToUi(sc, "/jobs") + // The completed jobs table should have one row. The first row will be the most recent job: + val firstRow = find(cssSelector("tbody tr")).get.underlying + val firstRowColumns = firstRow.findElements(By.tagName("td")) + // if first row can get the id 0, then the page is rendered and the scheduling mode is + // displayed with no error when we visit http://localhost:4040/jobs/ even though + // we configure the scheduling mode like spark.scheduler.mode=fair + // instead of spark.scheculer.mode=FAIR + firstRowColumns.get(0).getText should be ("0") + } + } + } + test("effects of unpersist() / persist() should be reflected") { // Regression test for SPARK-2527 withSpark(newSparkContext()) { sc => --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org