Github user andrewor14 commented on a diff in the pull request:

    https://github.com/apache/spark/pull/42#discussion_r10504393
  
    --- Diff: core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala 
---
    @@ -54,87 +54,53 @@ import org.apache.spark.util.{MetadataCleaner, 
MetadataCleanerType, TimeStampedH
      */
     private[spark]
     class DAGScheduler(
    -    taskSched: TaskScheduler,
    +    taskScheduler: TaskScheduler,
    +    listenerBus: SparkListenerBus,
         mapOutputTracker: MapOutputTrackerMaster,
         blockManagerMaster: BlockManagerMaster,
         env: SparkEnv)
       extends Logging {
     
    -  def this(taskSched: TaskScheduler) {
    -    this(taskSched, 
SparkEnv.get.mapOutputTracker.asInstanceOf[MapOutputTrackerMaster],
    -      SparkEnv.get.blockManager.master, SparkEnv.get)
    -  }
    -  taskSched.setDAGScheduler(this)
    -
    -  // Called by TaskScheduler to report task's starting.
    -  def taskStarted(task: Task[_], taskInfo: TaskInfo) {
    -    eventProcessActor ! BeginEvent(task, taskInfo)
    -  }
    -
    -  // Called to report that a task has completed and results are being 
fetched remotely.
    -  def taskGettingResult(task: Task[_], taskInfo: TaskInfo) {
    -    eventProcessActor ! GettingResultEvent(task, taskInfo)
    -  }
    +  import DAGScheduler._
     
    -  // Called by TaskScheduler to report task completions or failures.
    -  def taskEnded(
    -      task: Task[_],
    -      reason: TaskEndReason,
    -      result: Any,
    -      accumUpdates: Map[Long, Any],
    -      taskInfo: TaskInfo,
    -      taskMetrics: TaskMetrics) {
    -    eventProcessActor ! CompletionEvent(task, reason, result, 
accumUpdates, taskInfo, taskMetrics)
    +  def this(sc: SparkContext, taskScheduler: TaskScheduler) = {
    +    this(
    +      taskScheduler,
    +      sc.listenerBus,
    +      sc.env.mapOutputTracker.asInstanceOf[MapOutputTrackerMaster],
    +      sc.env.blockManager.master,
    +      sc.env)
       }
     
    -  // Called by TaskScheduler when an executor fails.
    -  def executorLost(execId: String) {
    -    eventProcessActor ! ExecutorLost(execId)
    -  }
    -
    -  // Called by TaskScheduler when a host is added
    -  def executorGained(execId: String, host: String) {
    -    eventProcessActor ! ExecutorGained(execId, host)
    -  }
    -
    -  // Called by TaskScheduler to cancel an entire TaskSet due to either 
repeated failures or
    -  // cancellation of the job itself.
    -  def taskSetFailed(taskSet: TaskSet, reason: String) {
    -    eventProcessActor ! TaskSetFailed(taskSet, reason)
    -  }
    -
    -  // The time, in millis, to wait for fetch failure events to stop coming 
in after one is detected;
    -  // this is a simplistic way to avoid resubmitting tasks in the 
non-fetchable map stage one by one
    -  // as more failure events come in
    -  val RESUBMIT_TIMEOUT = 200.milliseconds
    -
    -  // The time, in millis, to wake up between polls of the completion queue 
in order to potentially
    -  // resubmit failed stages
    -  val POLL_TIMEOUT = 10L
    -
    -  // Warns the user if a stage contains a task with size greater than this 
value (in KB)
    -  val TASK_SIZE_TO_WARN = 100
    --- End diff --
    
    These lines are moved to elsewhere in the file, not deleted.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to