Github user markhamstra commented on a diff in the pull request:

    https://github.com/apache/spark/pull/12060#discussion_r58467066
  
    --- Diff: core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala 
---
    @@ -403,32 +403,47 @@ class DAGScheduler(
         parents.toList
       }
     
    -  /** Find ancestor shuffle dependencies that are not registered in 
shuffleToMapStage yet */
    -  private def getAncestorShuffleDependencies(rdd: RDD[_]): 
Stack[ShuffleDependency[_, _, _]] = {
    -    val parents = new Stack[ShuffleDependency[_, _, _]]
    +  /**
    +   * Find ancestor shuffle dependencies that are not registered in 
shuffleToMapStage yet.
    +   * This is done in topological order to create ancestor stages first to 
ensure that the result
    +   * stage graph is correctly built.
    +   */
    +  private def getAncestorShuffleDependencies(rdd: RDD[_]): 
Seq[ShuffleDependency[_, _, _]] = {
    +    val parents = new ArrayBuffer[ShuffleDependency[_, _, _]]
         val visited = new HashSet[RDD[_]]
         // We are manually maintaining a stack here to prevent 
StackOverflowError
         // caused by recursively visiting
         val waitingForVisit = new Stack[RDD[_]]
         def visit(r: RDD[_]) {
           if (!visited(r)) {
    -        visited += r
    -        for (dep <- r.dependencies) {
    -          dep match {
    -            case shufDep: ShuffleDependency[_, _, _] =>
    -              if (!shuffleToMapStage.contains(shufDep.shuffleId)) {
    -                parents.push(shufDep)
    -              }
    -            case _ =>
    +        val deps = r.dependencies.filter {
    +          case shufDep: ShuffleDependency[_, _, _] =>
    +            !shuffleToMapStage.contains(shufDep.shuffleId)
    +          case _ => true
    +        }
    +        if (deps.forall(dep => visited(dep.rdd))) {
    +          waitingForVisit.pop()
    +          visited += r
    +          for (dep <- deps) {
    +            dep match {
    +              case shufDep: ShuffleDependency[_, _, _] =>
    +                parents += shufDep
    +              case _ =>
    +            }
    +          }
    +        } else {
    +          for (dep <- deps if !visited(dep.rdd)) {
    +            waitingForVisit.push(dep.rdd)
               }
    -          waitingForVisit.push(dep.rdd)
             }
    +      } else {
    +        waitingForVisit.pop()
           }
         }
     
    --- End diff --
    
    This cuts down on the repeated scanning of data structures and should 
increase performance a little more (only lightly tested):
    ```scala
        def visit(r: RDD[_]) {
          if (visited(r)) {
            waitingForVisit.pop()
          } else {
            val visitedShuffleDeps = new ArrayBuffer[ShuffleDependency[_, _, _]]
            val unvisitedDeps = new ArrayBuffer[Dependency[_]]
    
            r.dependencies.foreach {
              case dep: ShuffleDependency[_, _, _] if 
!shuffleToMapStage.contains(dep.shuffleId) =>
                if (visited(dep.rdd)) visitedShuffleDeps += dep
                else unvisitedDeps += dep
              case dep if !visited(dep.rdd) => unvisitedDeps += dep
              case _ =>
            }
    
            if (unvisitedDeps.isEmpty) {
              waitingForVisit.pop()
              visited += r
              for (shufDep <- visitedShuffleDeps) { parents += shufDep }
            } else {
              for (dep <- unvisitedDeps) {
                waitingForVisit.push(dep.rdd)
              }
            }
          }
        }
    ```


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to