Github user kayousterhout commented on a diff in the pull request:

    https://github.com/apache/spark/pull/3779#discussion_r24065873
  
    --- Diff: 
core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala ---
    @@ -506,13 +506,59 @@ private[spark] class TaskSetManager(
        * Get the level we can launch tasks according to delay scheduling, 
based on current wait time.
        */
       private def getAllowedLocalityLevel(curTime: Long): 
TaskLocality.TaskLocality = {
    -    while (curTime - lastLaunchTime >= localityWaits(currentLocalityIndex) 
&&
    -        currentLocalityIndex < myLocalityLevels.length - 1)
    -    {
    -      // Jump to the next locality level, and remove our waiting time for 
the current one since
    -      // we don't want to count it again on the next one
    -      lastLaunchTime += localityWaits(currentLocalityIndex)
    -      currentLocalityIndex += 1
    +    // Remove the scheduled or finished tasks lazily
    +    def hasNotScheduledTasks(taskIndexes: ArrayBuffer[Int]): Boolean = {
    +      var indexOffset = taskIndexes.size
    +      while (indexOffset > 0) {
    +        indexOffset -= 1
    +        val index = taskIndexes(indexOffset)
    +        if (copiesRunning(index) == 0 && !successful(index)) {
    +          return true
    +        } else {
    +          taskIndexes.remove(indexOffset)
    +        }
    +      }
    +      false
    +    }
    +    // It removes the empty lists after check
    +    def hasMoreTasks(pendingTasks: HashMap[String, ArrayBuffer[Int]]): 
Boolean = {
    +      val emptyKeys = new ArrayBuffer[String]
    +      val hasTasks = pendingTasks.exists{
    +        case (id: String, tasks: ArrayBuffer[Int]) =>
    +          if (hasNotScheduledTasks(tasks)) {
    +            true
    +          } else {
    +            emptyKeys += id
    +            false
    +          }
    +      }
    +      emptyKeys.foreach(x => pendingTasks.remove(x))
    +      hasTasks
    +    }
    +
    +    while (currentLocalityIndex < myLocalityLevels.length - 1) {
    +      val moreTasks = myLocalityLevels(currentLocalityIndex) match {
    +        case TaskLocality.PROCESS_LOCAL => 
hasMoreTasks(pendingTasksForExecutor)
    +        case TaskLocality.NODE_LOCAL => hasMoreTasks(pendingTasksForHost)
    +        case TaskLocality.NO_PREF => pendingTasksWithNoPrefs.isEmpty
    +        case TaskLocality.RACK_LOCAL => hasMoreTasks(pendingTasksForRack)
    +      }
    +      if (!moreTasks) {
    +        // Move to next locality level if there is no task for current 
level
    --- End diff --
    
    Add something like: "This is a performance optimization: if there are no 
more tasks that can be scheduled at a particular locality level, there is no 
point in waiting for the locality wait timeout (SPARK-4939)."


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to