Github user mengxr commented on a diff in the pull request:

    https://github.com/apache/spark/pull/1562#discussion_r15439511
  
    --- Diff: core/src/main/scala/org/apache/spark/Partitioner.scala ---
    @@ -105,24 +108,91 @@ class RangePartitioner[K : Ordering : ClassTag, V](
     
       private var ordering = implicitly[Ordering[K]]
     
    +  @transient private[spark] var singlePass = true // for unit tests
    +
       // An array of upper bounds for the first (partitions - 1) partitions
       private var rangeBounds: Array[K] = {
         if (partitions == 1) {
    -      Array()
    +      Array.empty
         } else {
    -      val rddSize = rdd.count()
    -      val maxSampleSize = partitions * 20.0
    -      val frac = math.min(maxSampleSize / math.max(rddSize, 1), 1.0)
    -      val rddSample = rdd.sample(false, frac, 1).map(_._1).collect().sorted
    -      if (rddSample.length == 0) {
    -        Array()
    +      // This is the sample size we need to have roughly balanced output 
partitions.
    +      val sampleSize = 20.0 * partitions
    +      // Assume the input partitions are roughly balanced and over-sample 
a little bit.
    +      val sampleSizePerPartition = math.ceil(3.0 * sampleSize / 
rdd.partitions.size).toInt
    +      val shift = rdd.id
    +      val classTagK = classTag[K]
    +      val sketch = rdd.mapPartitionsWithIndex { (idx, iter) =>
    +        val seed = byteswap32(idx + shift)
    +        val (sample, n) = SamplingUtils.reservoirSampleAndCount(
    +          iter.map(_._1), sampleSizePerPartition, seed)(classTagK)
    +        Iterator((idx, n, sample))
    +      }.collect()
    +      var numItems = 0L
    +      sketch.foreach { case (_, n, _) =>
    +        numItems += n
    +      }
    +      if (numItems == 0L) {
    +        Array.empty
           } else {
    -        val bounds = new Array[K](partitions - 1)
    -        for (i <- 0 until partitions - 1) {
    -          val index = (rddSample.length - 1) * (i + 1) / partitions
    -          bounds(i) = rddSample(index)
    +        // If a partition contains much more than the average number of 
items, we re-sample from it
    +        // to ensure that enough items are collected from that partition.
    +        val fraction = math.min(sampleSize / math.max(numItems, 1L), 1.0)
    +        val candidates = ArrayBuffer.empty[(K, Float)]
    +        val imbalancedPartitions = ArrayBuffer.empty[Int]
    +        sketch.foreach { case (idx, n, sample) =>
    +          if (fraction * n > sampleSizePerPartition) {
    +            imbalancedPartitions += idx
    +          } else {
    +            // The weight is 1 over the sampling probability.
    +            val weight = (n.toDouble / sample.size).toFloat
    +            sample.foreach { key =>
    +              candidates += ((key, weight))
    +            }
    --- End diff --
    
    I think `foreach` should be faster than `for` here.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to