Github user andrewor14 commented on a diff in the pull request:

    https://github.com/apache/spark/pull/731#discussion_r23509679
  
    --- Diff: core/src/main/scala/org/apache/spark/deploy/master/Master.scala 
---
    @@ -502,43 +502,14 @@ private[spark] class Master(
        * launched an executor for the app on it (right now the standalone 
backend doesn't like having
        * two executors on the same worker).
        */
    -  def canUse(app: ApplicationInfo, worker: WorkerInfo): Boolean = {
    -    worker.memoryFree >= app.desc.memoryPerSlave && 
!worker.hasExecutor(app)
    +  private def canUse(app: ApplicationInfo, worker: WorkerInfo): Boolean = {
    +    worker.memoryFree >= app.desc.memoryPerExecutorMB && 
!worker.hasExecutor(app) &&
    +    worker.coresFree > 0
       }
     
    -  /**
    -   * Schedule the currently available resources among waiting apps. This 
method will be called
    -   * every time a new app joins or resource availability changes.
    -   */
    -  private def schedule() {
    -    if (state != RecoveryState.ALIVE) { return }
    -
    -    // First schedule drivers, they take strict precedence over 
applications
    -    // Randomization helps balance drivers
    -    val shuffledAliveWorkers = Random.shuffle(workers.toSeq.filter(_.state 
== WorkerState.ALIVE))
    -    val numWorkersAlive = shuffledAliveWorkers.size
    -    var curPos = 0
    -
    -    for (driver <- waitingDrivers.toList) { // iterate over a copy of 
waitingDrivers
    -      // We assign workers to each waiting driver in a round-robin 
fashion. For each driver, we
    -      // start from the last worker that was assigned a driver, and 
continue onwards until we have
    -      // explored all alive workers.
    -      var launched = false
    -      var numWorkersVisited = 0
    -      while (numWorkersVisited < numWorkersAlive && !launched) {
    -        val worker = shuffledAliveWorkers(curPos)
    -        numWorkersVisited += 1
    -        if (worker.memoryFree >= driver.desc.mem && worker.coresFree >= 
driver.desc.cores) {
    -          launchDriver(worker, driver)
    -          waitingDrivers -= driver
    -          launched = true
    -        }
    -        curPos = (curPos + 1) % numWorkersAlive
    -      }
    -    }
    -
    -    // Right now this is a very simple FIFO scheduler. We keep trying to 
fit in the first app
    -    // in the queue, then the second app, etc.
    +  // Right now this is a very simple FIFO scheduler. We keep trying to fit 
in the first app
    +  // in the queue, then the second app, etc.
    +  private def startSingleExecutorPerWorker() {
    --- End diff --
    
    This method should have a java doc not a comment


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to