Github user skonto commented on a diff in the pull request:

    https://github.com/apache/spark/pull/11157#discussion_r73508196
  
    --- Diff: 
core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala
 ---
    @@ -357,4 +375,194 @@ private[mesos] trait MesosSchedulerUtils extends 
Logging {
         
sc.conf.getTimeAsSeconds("spark.mesos.rejectOfferDurationForReachedMaxCores", 
"120s")
       }
     
    +  /**
    +   * Checks executor ports if they are within some range of the offered 
list of ports ranges,
    +   *
    +   * @param conf the Spark Config
    +   * @param ports the list of ports to check
    +   * @return true if ports are within range false otherwise
    +   */
    +  protected def checkPorts(conf: SparkConf, ports: List[(Long, Long)]): 
Boolean = {
    +
    +    def checkIfInRange(port: Long, ps: List[(Long, Long)]): Boolean = {
    +      ps.exists(r => r._1 <= port & r._2 >= port)
    +    }
    +
    +    val portsToCheck = nonZeroPortValuesFromConfig(conf)
    +    val withinRange = portsToCheck.forall(p => checkIfInRange(p, ports))
    +    // make sure we have enough ports to allocate per offer
    +    ports.map(r => r._2 - r._1 + 1).sum >= portsToCheck.size && withinRange
    +  }
    +
    +  /**
    +   * Partitions port resources.
    +   *
    +   * @param portsToAssign non-zero ports to assign
    +   * @param offeredResources the resources offered
    +   * @return resources left, port resources to be used.
    +   */
    +  def partitionPortResources(portsToAssign: List[Long], offeredResources: 
List[Resource])
    +    : (List[Resource], List[Resource]) = {
    +    if (portsToAssign.isEmpty) {
    +     return (offeredResources, List[Resource]())
    +    }
    +    // partition port offers
    +    val (resourcesWithoutPorts, portResources) = 
filterPortResources(offeredResources)
    +    val offeredPortRanges = 
getRangeResourceWithRoleInfo(portResources.asJava, "ports")
    +    // reserve non-zero ports
    +    val nonZeroResources = reservePorts(offeredPortRanges, portsToAssign)
    +
    +    val (leftPortResources, assignedPortResources) =
    +      createResourcesFromAssignedPorts(nonZeroResources)
    +
    +    (resourcesWithoutPorts ++ leftPortResources, assignedPortResources)
    +  }
    +
    +  /**
    +   * Returns known port name used by the executor process.
    +   * @return the port name
    +   */
    +  def managedPortNames() : List[String] = List("spark.executor.port", 
"spark.blockManager.port")
    +
    +  /**
    +   * The values of the non-zero ports to be used by the executor process.
    +   * @param conf the spark config to use
    +   * @return the ono-zero values of the ports
    +   */
    +  def nonZeroPortValuesFromConfig(conf: SparkConf): List[Long] = {
    +    managedPortNames().map(conf.getLong(_, 0)).filter( _ != 0)
    +  }
    +
    +  /**
    +   * It gets a tuple for the non-zero port assigned resources.
    +   * First member of the tuple represents resources left while the second
    +   * resources used. A tuple is returned with final resources left (fist 
member)
    +   * and the resources used (second member).
    +   */
    +  private def createResourcesFromAssignedPorts(
    +      nonZero: (List[PortRangeResourceInfo], List[PortRangeResourceInfo]))
    +    : (List[Resource], List[Resource]) = {
    +        (nonZero._1.flatMap{port => createMesosPortResource(port.range, 
Some(port.role))},
    +          nonZero._2.flatMap{port => createMesosPortResource(port.range, 
Some(port.role))})
    +  }
    +
    +  private case class PortRangeResourceInfo(role: String, range: 
List[(Long, Long)])
    --- End diff --
    
    ok


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to