Repository: spark
Updated Branches:
  refs/heads/master 25cff1019 -> e537b33c6


[PySpark] Add blanklines to Python docstrings so example code renders correctly

Author: RJ Nowling <rnowl...@gmail.com>

Closes #1808 from rnowling/pyspark_docs and squashes the following commits:

c06d774 [RJ Nowling] Add blanklines to Python docstrings so example code 
renders correctly


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/e537b33c
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/e537b33c
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/e537b33c

Branch: refs/heads/master
Commit: e537b33c63d3fb373fe41deaa607d72e76e3906b
Parents: 25cff10
Author: RJ Nowling <rnowl...@gmail.com>
Authored: Wed Aug 6 14:12:21 2014 -0700
Committer: Xiangrui Meng <m...@databricks.com>
Committed: Wed Aug 6 14:12:21 2014 -0700

----------------------------------------------------------------------
 python/pyspark/rdd.py | 9 +++++++++
 1 file changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/e537b33c/python/pyspark/rdd.py
----------------------------------------------------------------------
diff --git a/python/pyspark/rdd.py b/python/pyspark/rdd.py
index 30b834d..756e8f3 100644
--- a/python/pyspark/rdd.py
+++ b/python/pyspark/rdd.py
@@ -134,6 +134,7 @@ class MaxHeapQ(object):
 
     """
     An implementation of MaxHeap.
+
     >>> import pyspark.rdd
     >>> heap = pyspark.rdd.MaxHeapQ(5)
     >>> [heap.insert(i) for i in range(10)]
@@ -381,6 +382,7 @@ class RDD(object):
     def getNumPartitions(self):
         """
         Returns the number of partitions in RDD
+
         >>> rdd = sc.parallelize([1, 2, 3, 4], 2)
         >>> rdd.getNumPartitions()
         2
@@ -570,6 +572,7 @@ class RDD(object):
         """
         Sorts this RDD, which is assumed to consist of (key, value) pairs.
         # noqa
+
         >>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
         >>> sc.parallelize(tmp).sortByKey(True, 2).collect()
         [('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
@@ -1209,6 +1212,7 @@ class RDD(object):
     def keys(self):
         """
         Return an RDD with the keys of each tuple.
+
         >>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
         >>> m.collect()
         [1, 3]
@@ -1218,6 +1222,7 @@ class RDD(object):
     def values(self):
         """
         Return an RDD with the values of each tuple.
+
         >>> m = sc.parallelize([(1, 2), (3, 4)]).values()
         >>> m.collect()
         [2, 4]
@@ -1642,6 +1647,7 @@ class RDD(object):
          Internally, this uses a shuffle to redistribute data.
          If you are decreasing the number of partitions in this RDD, consider
          using `coalesce`, which can avoid performing a shuffle.
+
          >>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
          >>> sorted(rdd.glom().collect())
          [[1], [2, 3], [4, 5], [6, 7]]
@@ -1656,6 +1662,7 @@ class RDD(object):
     def coalesce(self, numPartitions, shuffle=False):
         """
         Return a new RDD that is reduced into `numPartitions` partitions.
+
         >>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
         [[1], [2, 3], [4, 5]]
         >>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
@@ -1694,6 +1701,7 @@ class RDD(object):
     def setName(self, name):
         """
         Assign a name to this RDD.
+
         >>> rdd1 = sc.parallelize([1,2])
         >>> rdd1.setName('RDD1')
         >>> rdd1.name()
@@ -1753,6 +1761,7 @@ class PipelinedRDD(RDD):
 
     """
     Pipelined maps:
+
     >>> rdd = sc.parallelize([1, 2, 3, 4])
     >>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
     [4, 8, 12, 16]


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to