Github user skonto commented on a diff in the pull request:

    https://github.com/apache/spark/pull/11157#discussion_r60301695
  
    --- Diff: 
core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerUtils.scala
 ---
    @@ -353,4 +371,247 @@ private[mesos] trait MesosSchedulerUtils extends 
Logging {
         
sc.conf.getTimeAsSeconds("spark.mesos.rejectOfferDurationForUnmetConstraints", 
"120s")
       }
     
    +  /**
    +   * Checks executor ports if they are within some range of the offered 
list of ports ranges,
    +   *
    +   * @param sc the Spark Context
    +   * @param ports the list of ports to check
    +   * @param takenPorts ports already used for that slave
    +   * @return true if ports are within range false otherwise
    +   */
    +  protected def checkPorts(sc: SparkContext, ports: List[(Long, Long)],
    +                           takenPorts: List[Long] = List()): Boolean = {
    +
    +    def checkIfInRange(port: Int, ps: List[(Long, Long)]): Boolean = {
    +      ps.exists(r => r._1 <= port & r._2 >= port)
    +    }
    +
    +    val portsToCheck = List(sc.conf.getInt("spark.executor.port", 0),
    +      sc.conf.getInt("spark.blockManager.port", 0))
    +    val nonZeroPorts = portsToCheck.filter(_ != 0)
    +
    +    // If we require a port that is taken we have to decline the offer 
since mesos
    +    // shares all port ranges on the slave
    +    val contained = for {port <- nonZeroPorts}
    +      yield {
    +        takenPorts.contains(port)
    +      }
    +
    +    if (contained.contains(true)) {
    +      return false
    +    }
    +
    +    val withinRange = nonZeroPorts.forall(p => checkIfInRange(p, ports))
    +
    +    // make sure we have enough ports to allocate per offer
    +    ports.map(r => r._2 - r._1 + 1).sum >= portsToCheck.size && withinRange
    +  }
    +
    +  /**
    +   * Partitions port resources.
    +   *
    +   * @param conf the spark config
    +   * @param ports the ports offered
    +   * @return resources left, port resources to be used and the list of 
assigned ports
    +   */
    +  def partitionPorts(
    +      conf: SparkConf,
    +      ports: List[Resource]): (List[Resource], List[Resource], List[Long]) 
= {
    +
    +    val taskPortRanges = getRangeResourceWithRoleInfo(ports.asJava, 
"ports")
    +
    +    val portsToCheck = List(conf.getInt("spark.executor.port", 0).toLong,
    +      conf.getInt("spark.blockManager.port", 0).toLong)
    +
    +    val nonZeroPorts = portsToCheck.filter(_ != 0)
    +
    +    // reserve non zero ports first
    +
    +    val nonZeroResources = reservePorts(taskPortRanges, nonZeroPorts)
    +
    +    // reserve actual port numbers for zero ports - not set by the user
    +
    +    val numOfZeroPorts = portsToCheck.count(_ == 0)
    +
    +    val randPorts = pickRandomPortsFromRanges(nonZeroResources._1, 
numOfZeroPorts)
    +
    +    val zeroResources = reservePorts(nonZeroResources._1, randPorts)
    +
    +    val (resourcesLeft, resourcesToBeUsed) = 
createResources(nonZeroResources, zeroResources)
    +
    +    (resourcesLeft, resourcesToBeUsed, nonZeroPorts ++ randPorts)
    +  }
    +
    +  private def createResources(
    +      nonZero: (List[PortRangeResourceInfo], List[PortRangeResourceInfo]),
    +      zero: (List[PortRangeResourceInfo], List[PortRangeResourceInfo]))
    +      : (List[Resource], List[Resource]) = {
    +
    +    val resources = {
    +      if (nonZero._2.isEmpty) { // no user ports were defined
    +        (zero._1.flatMap{port => createMesosPortResource(port.value, 
Some(port.role))},
    +          zero._2.flatMap{port => createMesosPortResource(port.value, 
Some(port.role))})
    +
    +      } else if (zero._2.isEmpty) { // no random ports were defined
    +        (nonZero._1.flatMap{port => createMesosPortResource(port.value, 
Some(port.role))},
    +          nonZero._2.flatMap{port => createMesosPortResource(port.value, 
Some(port.role))})
    +      }
    +      else {  // we have user defined and random ports defined
    +        val left = zero._1.flatMap{port => 
createMesosPortResource(port.value, Some(port.role))}
    +
    +        val used = nonZero._2.flatMap{port =>
    +          createMesosPortResource(port.value, Some(port.role))} ++
    +          zero._2.flatMap{port => createMesosPortResource(port.value, 
Some(port.role))}
    +
    +        (left, used)
    +      }
    +    }
    +    resources
    +  }
    +
    +  private case class PortRangeResourceInfo(role: String, value: 
List[(Long, Long)])
    +
    +  private def getRangeResourceWithRoleInfo(res: JList[Resource], name: 
String)
    +      : List[PortRangeResourceInfo] = {
    +    // A resource can have multiple values in the offer since it can 
either be from
    +    // a specific role or wildcard.
    +    res.asScala.filter(_.getName == name)
    +      .map{res => PortRangeResourceInfo(res.getRole, 
res.getRanges.getRangeList.asScala
    +        .map(r => (r.getBegin, r.getEnd)).toList) }.toList
    +  }
    +
    +  private def reservePorts(
    +      availablePortRanges: List[PortRangeResourceInfo],
    +      wantedPorts: List[Long])
    +      : (List[PortRangeResourceInfo], List[PortRangeResourceInfo]) = {
    +
    +    if (wantedPorts.isEmpty) { // port list is empty we didnt consume any 
resources
    +      return (availablePortRanges, List())
    +    }
    +
    +    var tmpLeft = availablePortRanges
    +    val tmpRanges = for {port <- wantedPorts}
    +      yield {
    +        val ret = findPortAndSplitRange(port, tmpLeft)
    +        val rangeToRemove = ret._1
    +        val diffRanges = tmpLeft.filterNot{r => r == rangeToRemove}
    +        val newRangesLeft = diffRanges ++ List(ret._2).flatMap(p => p)
    +        tmpLeft = newRangesLeft
    +        ret
    +      }
    +
    +    val rangesToRemove = tmpRanges.map(x => x._1)
    +    val test = availablePortRanges ++ tmpRanges.flatMap{x => x._2}
    +    val newRangesLeft = (availablePortRanges ++ tmpRanges.flatMap{x => 
x._2})
    +      .flatMap{r => removeRanges(r, rangesToRemove)}
    +
    +    val newRanges = tmpRanges.map{r => PortRangeResourceInfo(r._1.role, 
List((r._3, r._3)))}
    +
    +    (newRangesLeft, newRanges)
    +  }
    +
    +  private def removeRanges(
    +      rangeA: PortRangeResourceInfo,
    +      rangesToRemove: List[PortRangeResourceInfo])
    +      : Option[PortRangeResourceInfo] = {
    +
    +   val ranges = 
rangeA.value.filterNot(rangesToRemove.flatMap{_.value}.toSet)
    +
    +    if (ranges.isEmpty) {
    +        None
    +      } else {
    --- End diff --
    
    ok


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to