Github user mateiz commented on a diff in the pull request:

    https://github.com/apache/spark/pull/931#discussion_r13315133
  
    --- Diff: 
core/src/main/scala/org/apache/spark/rdd/OrderedRDDFunctions.scala ---
    @@ -17,54 +17,123 @@
     
     package org.apache.spark.rdd
     
    +import java.util.Comparator
    +
    +import scala.collection.mutable.ArrayBuffer
     import scala.reflect.ClassTag
     
    -import org.apache.spark.{Logging, RangePartitioner}
    +import org.apache.spark.{Logging, RangePartitioner, SparkEnv}
    +import org.apache.spark.util.collection.{ExternalAppendOnlyMap, 
AppendOnlyMap}
     
     /**
    - * Extra functions available on RDDs of (key, value) pairs where the key 
is sortable through
    - * an implicit conversion. Import `org.apache.spark.SparkContext._` at the 
top of your program to
    - * use these functions. They will work with any key type `K` that has an 
implicit `Ordering[K]` in
    - * scope.  Ordering objects already exist for all of the standard 
primitive types.  Users can also
    - * define their own orderings for custom types, or to override the default 
ordering.  The implicit
    - * ordering that is in the closest scope will be used.
    - *
    - * {{{
    - *   import org.apache.spark.SparkContext._
    - *
    - *   val rdd: RDD[(String, Int)] = ...
    - *   implicit val caseInsensitiveOrdering = new Ordering[String] {
    - *     override def compare(a: String, b: String) = 
a.toLowerCase.compare(b.toLowerCase)
    - *   }
    - *
    - *   // Sort by key, using the above case insensitive ordering.
    - *   rdd.sortByKey()
    - * }}}
    - */
    +  * Extra functions available on RDDs of (key, value) pairs where the key 
is sortable through
    +  * an implicit conversion. Import `org.apache.spark.SparkContext._` at 
the top of your program to
    +  * use these functions. They will work with any key type `K` that has an 
implicit `Ordering[K]` in
    +  * scope.  Ordering objects already exist for all of the standard 
primitive types.  Users can also
    +  * define their own orderings for custom types, or to override the 
default ordering.  The implicit
    +  * ordering that is in the closest scope will be used.
    +  *
    +  * {{{
    +  *   import org.apache.spark.SparkContext._
    +  *
    +  *   val rdd: RDD[(String, Int)] = ...
    +  *   implicit val caseInsensitiveOrdering = new Ordering[String] {
    +  *     override def compare(a: String, b: String) = 
a.toLowerCase.compare(b.toLowerCase)
    +  *   }
    +  *
    +  *   // Sort by key, using the above case insensitive ordering.
    +  *   rdd.sortByKey()
    +  * }}}
    +  */
    +
     class OrderedRDDFunctions[K : Ordering : ClassTag,
                               V: ClassTag,
                               P <: Product2[K, V] : ClassTag](
    -    self: RDD[P])
    -  extends Logging with Serializable {
    +  self: RDD[P])
    +extends Logging with Serializable {
     
       private val ordering = implicitly[Ordering[K]]
     
    +  private type SortCombiner = ArrayBuffer[V]
       /**
    -   * Sort the RDD by key, so that each partition contains a sorted range 
of the elements. Calling
    -   * `collect` or `save` on the resulting RDD will return or output an 
ordered list of records
    -   * (in the `save` case, they will be written to multiple `part-X` files 
in the filesystem, in
    -   * order of the keys).
    -   */
    +    * Sort the RDD by key, so that each partition contains a sorted range 
of the elements. Calling
    +    * `collect` or `save` on the resulting RDD will return or output an 
ordered list of records
    +    * (in the `save` case, they will be written to multiple `part-X` files 
in the filesystem, in
    +      * order of the keys).
    +    */
       def sortByKey(ascending: Boolean = true, numPartitions: Int = 
self.partitions.size): RDD[P] = {
    +    val externalSorting = 
SparkEnv.get.conf.getBoolean("spark.shuffle.spill", true)
         val part = new RangePartitioner(numPartitions, self, ascending)
         val shuffled = new ShuffledRDD[K, V, P](self, part)
    -    shuffled.mapPartitions(iter => {
    -      val buf = iter.toArray
    +        if (!externalSorting) {
    +          shuffled.mapPartitions(iter => {
    +              val buf = iter.toArray
    +              if (ascending) {
    +                buf.sortWith((x, y) => ordering.lt(x._1, y._1)).iterator
    +              } else {
    +                buf.sortWith((x, y) => ordering.gt(x._1, y._1)).iterator
    +              }
    +            }, preservesPartitioning = true)
    +        } else {
    +          shuffled.mapPartitions(iter => {
    +              val map = createExternalMap(ascending)
    +              while (iter.hasNext) { 
    +                val kv = iter.next()
    +                map.insert(kv._1, kv._2)
    +              }
    +              map.iterator
    +            }).flatMap(elem => {
    +              for (value <- elem._2)
    +                yield((elem._1, value).asInstanceOf[P])
    +            })
    +        }
    +  }
    +
    +  private def createExternalMap(ascending: Boolean) 
:ExternalAppendOnlyMap[K, V, SortCombiner] = {
    --- End diff --
    
    Should be `): ExternalAppendOnlyMap`


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to