This is an automated email from the ASF dual-hosted git repository.

srowen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 720fe2f7e60 [MINOR] Fix typo `Exlude` to `Exclude` in `HealthTracker`
720fe2f7e60 is described below

commit 720fe2f7e6054ba25bd06fcc154127c74d057c5d
Author: sychen <syc...@ctrip.com>
AuthorDate: Tue Jan 31 08:13:44 2023 -0600

    [MINOR] Fix typo `Exlude` to `Exclude` in `HealthTracker`
    
    ### What changes were proposed in this pull request?
    Fix typo
    
    ### Why are the changes needed?
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    exist UT
    
    Closes #39798 from cxzl25/typo_HealthTracker.
    
    Authored-by: sychen <syc...@ctrip.com>
    Signed-off-by: Sean Owen <sro...@gmail.com>
---
 .../scala/org/apache/spark/scheduler/HealthTracker.scala     | 12 ++++++------
 .../org/apache/spark/scheduler/HealthTrackerSuite.scala      |  6 +++---
 .../spark/deploy/yarn/YarnAllocatorNodeHealthTracker.scala   |  2 +-
 3 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/scheduler/HealthTracker.scala 
b/core/src/main/scala/org/apache/spark/scheduler/HealthTracker.scala
index 6bd5668651a..d7ddeade2fd 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/HealthTracker.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/HealthTracker.scala
@@ -62,7 +62,7 @@ private[scheduler] class HealthTracker (
   HealthTracker.validateExcludeOnFailureConfs(conf)
   private val MAX_FAILURES_PER_EXEC = conf.get(config.MAX_FAILURES_PER_EXEC)
   private val MAX_FAILED_EXEC_PER_NODE = 
conf.get(config.MAX_FAILED_EXEC_PER_NODE)
-  val EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS = 
HealthTracker.getExludeOnFailureTimeout(conf)
+  val EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS = 
HealthTracker.getExcludeOnFailureTimeout(conf)
   private val EXCLUDE_FETCH_FAILURE_ENABLED =
     conf.get(config.EXCLUDE_ON_FAILURE_FETCH_FAILURE_ENABLED)
   private val EXCLUDE_ON_FAILURE_DECOMMISSION_ENABLED =
@@ -93,7 +93,7 @@ private[scheduler] class HealthTracker (
    * remove from this when executors are removed from spark, so we can track 
when we get multiple
    * successive excluded executors on one node.  Nonetheless, it will not grow 
too large because
    * there cannot be many excluded executors on one node, before we stop 
requesting more
-   * executors on that node, and we clean up the list of exluded executors 
once an executor has
+   * executors on that node, and we clean up the list of excluded executors 
once an executor has
    * been excluded for EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS.
    */
   val nodeToExcludedExecs = new HashMap[String, HashSet[String]]()
@@ -110,7 +110,7 @@ private[scheduler] class HealthTracker (
       // Apply the timeout to excluded nodes and executors
       val execsToInclude = executorIdToExcludedStatus.filter(_._2.expiryTime < 
now).keys
       if (execsToInclude.nonEmpty) {
-        // Include any executors that have been exluded longer than the 
excludeOnFailure timeout.
+        // Include any executors that have been excluded longer than the 
excludeOnFailure timeout.
         logInfo(s"Removing executors $execsToInclude from exclude list because 
the " +
           s"the executors have reached the timed out")
         execsToInclude.foreach { exec =>
@@ -382,7 +382,7 @@ private[scheduler] class HealthTracker (
     /**
      * Apply the timeout to individual tasks.  This is to prevent one-off 
failures that are very
      * spread out in time (and likely have nothing to do with problems on the 
executor) from
-     * triggering exlusion.  However, note that we do *not* remove executors 
and nodes from
+     * triggering exclusion.  However, note that we do *not* remove executors 
and nodes from
      * being excluded as we expire individual task failures -- each have their 
own timeout.  E.g.,
      * suppose:
      *  * timeout = 10, maxFailuresPerExec = 2
@@ -447,7 +447,7 @@ private[spark] object HealthTracker extends Logging {
     }
   }
 
-  def getExludeOnFailureTimeout(conf: SparkConf): Long = {
+  def getExcludeOnFailureTimeout(conf: SparkConf): Long = {
     conf.get(config.EXCLUDE_ON_FAILURE_TIMEOUT_CONF).getOrElse {
       conf.get(config.EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF).getOrElse {
         Utils.timeStringAsMs(DEFAULT_TIMEOUT)
@@ -484,7 +484,7 @@ private[spark] object HealthTracker extends Logging {
       }
     }
 
-    val timeout = getExludeOnFailureTimeout(conf)
+    val timeout = getExcludeOnFailureTimeout(conf)
     if (timeout <= 0) {
       // first, figure out where the timeout came from, to include the right 
conf in the message.
       conf.get(config.EXCLUDE_ON_FAILURE_TIMEOUT_CONF) match {
diff --git 
a/core/src/test/scala/org/apache/spark/scheduler/HealthTrackerSuite.scala 
b/core/src/test/scala/org/apache/spark/scheduler/HealthTrackerSuite.scala
index c098f8d9a02..e7a57c22ef6 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/HealthTrackerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/HealthTrackerSuite.scala
@@ -427,10 +427,10 @@ class HealthTrackerSuite extends SparkFunSuite with 
MockitoSugar with LocalSpark
     assert(!HealthTracker.isExcludeOnFailureEnabled(conf))
     conf.set(config.EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF, 5000L)
     assert(HealthTracker.isExcludeOnFailureEnabled(conf))
-    assert(5000 === HealthTracker.getExludeOnFailureTimeout(conf))
+    assert(5000 === HealthTracker.getExcludeOnFailureTimeout(conf))
     // the new conf takes precedence, though
     conf.set(config.EXCLUDE_ON_FAILURE_TIMEOUT_CONF, 1000L)
-    assert(1000 === HealthTracker.getExludeOnFailureTimeout(conf))
+    assert(1000 === HealthTracker.getExcludeOnFailureTimeout(conf))
 
     // if you explicitly set the legacy conf to 0, that also would disable 
excluding
     conf.set(config.EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF, 0L)
@@ -438,7 +438,7 @@ class HealthTrackerSuite extends SparkFunSuite with 
MockitoSugar with LocalSpark
     // but again, the new conf takes precedence
     conf.set(config.EXCLUDE_ON_FAILURE_ENABLED, true)
     assert(HealthTracker.isExcludeOnFailureEnabled(conf))
-    assert(1000 === HealthTracker.getExludeOnFailureTimeout(conf))
+    assert(1000 === HealthTracker.getExcludeOnFailureTimeout(conf))
   }
 
   test("check exclude configuration invariants") {
diff --git 
a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocatorNodeHealthTracker.scala
 
b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocatorNodeHealthTracker.scala
index de9e1903614..bfe0face8c2 100644
--- 
a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocatorNodeHealthTracker.scala
+++ 
b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocatorNodeHealthTracker.scala
@@ -50,7 +50,7 @@ private[spark] class YarnAllocatorNodeHealthTracker(
     failureTracker: FailureTracker)
   extends Logging {
 
-  private val excludeOnFailureTimeoutMillis = 
HealthTracker.getExludeOnFailureTimeout(sparkConf)
+  private val excludeOnFailureTimeoutMillis = 
HealthTracker.getExcludeOnFailureTimeout(sparkConf)
 
   private val launchExcludeOnFailureEnabled =
     sparkConf.get(YARN_EXECUTOR_LAUNCH_EXCLUDE_ON_FAILURE_ENABLED)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to