This is an automated email from the ASF dual-hosted git repository.

srowen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new d9978fb  [SPARK-26860][PYSPARK][SPARKR] Fix for RangeBetween and 
RowsBetween docs to be in sync with spark documentation
d9978fb is described below

commit d9978fb4e4d4de3a320b012373c18bd278462780
Author: Jagadesh Kiran <jagades...@in.verizon.com>
AuthorDate: Mon Mar 11 08:53:09 2019 -0500

    [SPARK-26860][PYSPARK][SPARKR] Fix for RangeBetween and RowsBetween docs to 
be in sync with spark documentation
    
    The docs describing RangeBetween & RowsBetween for pySpark & SparkR are not 
in sync with Spark description.
    
    a. Edited PySpark and SparkR docs  and made description same for both 
RangeBetween and RowsBetween
    b. created executable examples in both pySpark and SparkR documentation
    c. Locally tested the patch for scala Style checks and UT for checking no 
testcase failures
    
    Closes #23946 from jagadesh-kiran/master.
    
    Authored-by: Jagadesh Kiran <jagades...@in.verizon.com>
    Signed-off-by: Sean Owen <sean.o...@databricks.com>
---
 R/pkg/R/WindowSpec.R                               | 41 ++++++++++++++-
 python/pyspark/sql/window.py                       | 61 +++++++++++++++++++++-
 .../org/apache/spark/sql/expressions/Window.scala  |  4 +-
 3 files changed, 101 insertions(+), 5 deletions(-)

diff --git a/R/pkg/R/WindowSpec.R b/R/pkg/R/WindowSpec.R
index ee7f4ad..037809c 100644
--- a/R/pkg/R/WindowSpec.R
+++ b/R/pkg/R/WindowSpec.R
@@ -127,6 +127,16 @@ setMethod("orderBy",
 #' "0" means "current row", while "-1" means the row before the current row, 
and "5" means the
 #' fifth row after the current row.
 #'
+#' We recommend users use \code{Window.unboundedPreceding}, 
\code{Window.unboundedFollowing},
+#' and \code{Window.currentRow} to specify special boundary values, rather 
than using long values
+#' directly.
+#'
+#' A row based boundary is based on the position of the row within the 
partition.
+#' An offset indicates the number of rows above or below the current row, the 
frame for the
+#' current row starts or ends. For instance, given a row based sliding frame 
with a lower bound
+#' offset of -1 and a upper bound offset of +2. The frame for row with index 5 
would range from
+#' index 4 to index 6.
+#'
 #' @param x a WindowSpec
 #' @param start boundary start, inclusive.
 #'              The frame is unbounded if this is the minimum long value.
@@ -139,7 +149,14 @@ setMethod("orderBy",
 #' @family windowspec_method
 #' @examples
 #' \dontrun{
-#'   rowsBetween(ws, 0, 3)
+#'   id <- c(rep(1, 3), rep(2, 3), 3)
+#'   desc <- c('New', 'New', 'Good', 'New', 'Good', 'Good', 'New')
+#'   df <- data.frame(id, desc)
+#'   df <- createDataFrame(df)
+#'   w1 <- orderBy(windowPartitionBy('desc'), df$id)
+#'   w2 <- rowsBetween(w1, 0, 3)
+#'   df1 <- withColumn(df, "sum", over(sum(df$id), w2))
+#'   head(df1)
 #' }
 #' @note rowsBetween since 2.0.0
 setMethod("rowsBetween",
@@ -158,6 +175,19 @@ setMethod("rowsBetween",
 #' "current row", while "-1" means one off before the current row, and "5" 
means the five off
 #' after the current row.
 #'
+#' We recommend users use \code{Window.unboundedPreceding}, 
\code{Window.unboundedFollowing},
+#' and \code{Window.currentRow} to specify special boundary values, rather 
than using long values
+#' directly.
+#'
+#' A range-based boundary is based on the actual value of the ORDER BY
+#' expression(s). An offset is used to alter the value of the ORDER BY 
expression,
+#' for instance if the current ORDER BY expression has a value of 10 and the 
lower bound offset
+#' is -3, the resulting lower bound for the current row will be 10 - 3 = 7. 
This however puts a
+#' number of constraints on the ORDER BY expressions: there can be only one 
expression and this
+#' expression must have a numerical data type. An exception can be made when 
the offset is
+#' unbounded, because no value modification is needed, in this case multiple 
and non-numeric
+#' ORDER BY expression are allowed.
+#'
 #' @param x a WindowSpec
 #' @param start boundary start, inclusive.
 #'              The frame is unbounded if this is the minimum long value.
@@ -170,7 +200,14 @@ setMethod("rowsBetween",
 #' @family windowspec_method
 #' @examples
 #' \dontrun{
-#'   rangeBetween(ws, 0, 3)
+#'   id <- c(rep(1, 3), rep(2, 3), 3)
+#'   desc <- c('New', 'New', 'Good', 'New', 'Good', 'Good', 'New')
+#'   df <- data.frame(id, desc)
+#'   df <- createDataFrame(df)
+#'   w1 <- orderBy(windowPartitionBy('desc'), df$id)
+#'   w2 <- rangeBetween(w1, 0, 3)
+#'   df1 <- withColumn(df, "sum", over(sum(df$id), w2))
+#'   head(df1)
 #' }
 #' @note rangeBetween since 2.0.0
 setMethod("rangeBetween",
diff --git a/python/pyspark/sql/window.py b/python/pyspark/sql/window.py
index e76563d..65c3ff5 100644
--- a/python/pyspark/sql/window.py
+++ b/python/pyspark/sql/window.py
@@ -97,6 +97,32 @@ class Window(object):
         and ``Window.currentRow`` to specify special boundary values, rather 
than using integral
         values directly.
 
+        A row based boundary is based on the position of the row within the 
partition.
+        An offset indicates the number of rows above or below the current row, 
the frame for the
+        current row starts or ends. For instance, given a row based sliding 
frame with a lower bound
+        offset of -1 and a upper bound offset of +2. The frame for row with 
index 5 would range from
+        index 4 to index 6.
+
+        >>> from pyspark.sql import Window
+        >>> from pyspark.sql import functions as func
+        >>> from pyspark.sql import SQLContext
+        >>> sc = SparkContext.getOrCreate()
+        >>> sqlContext = SQLContext(sc)
+        >>> tup = [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")]
+        >>> df = sqlContext.createDataFrame(tup, ["id", "category"])
+        >>> window = 
Window.partitionBy("category").orderBy("id").rowsBetween(Window.currentRow, 1)
+        >>> df.withColumn("sum", func.sum("id").over(window)).show()
+        +---+--------+---+
+        | id|category|sum|
+        +---+--------+---+
+        |  1|       b|  3|
+        |  2|       b|  5|
+        |  3|       b|  3|
+        |  1|       a|  2|
+        |  1|       a|  3|
+        |  2|       a|  2|
+        +---+--------+---+
+
         :param start: boundary start, inclusive.
                       The frame is unbounded if this is 
``Window.unboundedPreceding``, or
                       any value less than or equal to -9223372036854775808.
@@ -127,6 +153,35 @@ class Window(object):
         and ``Window.currentRow`` to specify special boundary values, rather 
than using integral
         values directly.
 
+        A range-based boundary is based on the actual value of the ORDER BY
+        expression(s). An offset is used to alter the value of the ORDER BY 
expression, for
+        instance if the current ORDER BY expression has a value of 10 and the 
lower bound offset
+        is -3, the resulting lower bound for the current row will be 10 - 3 = 
7. This however puts a
+        number of constraints on the ORDER BY expressions: there can be only 
one expression and this
+        expression must have a numerical data type. An exception can be made 
when the offset is
+        unbounded, because no value modification is needed, in this case 
multiple and non-numeric
+        ORDER BY expression are allowed.
+
+        >>> from pyspark.sql import Window
+        >>> from pyspark.sql import functions as func
+        >>> from pyspark.sql import SQLContext
+        >>> sc = SparkContext.getOrCreate()
+        >>> sqlContext = SQLContext(sc)
+        >>> tup = [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")]
+        >>> df = sqlContext.createDataFrame(tup, ["id", "category"])
+        >>> window = 
Window.partitionBy("category").orderBy("id").rangeBetween(Window.currentRow, 1)
+        >>> df.withColumn("sum", func.sum("id").over(window)).show()
+        +---+--------+---+
+        | id|category|sum|
+        +---+--------+---+
+        |  1|       b|  3|
+        |  2|       b|  5|
+        |  3|       b|  3|
+        |  1|       a|  4|
+        |  1|       a|  4|
+        |  2|       a|  2|
+        +---+--------+---+
+
         :param start: boundary start, inclusive.
                       The frame is unbounded if this is 
``Window.unboundedPreceding``, or
                       any value less than or equal to max(-sys.maxsize, 
-9223372036854775808).
@@ -231,8 +286,12 @@ class WindowSpec(object):
 
 def _test():
     import doctest
+    import pyspark.sql.window
     SparkContext('local[4]', 'PythonTest')
-    (failure_count, test_count) = doctest.testmod()
+    globs = pyspark.sql.window.__dict__.copy()
+    (failure_count, test_count) = doctest.testmod(
+        pyspark.sql.window, globs=globs,
+        optionflags=doctest.NORMALIZE_WHITESPACE)
     if failure_count:
         sys.exit(-1)
 
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/expressions/Window.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/expressions/Window.scala
index 3d8d931..9a4ad44 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/expressions/Window.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/expressions/Window.scala
@@ -175,8 +175,8 @@ object Window {
    * directly.
    *
    * A range-based boundary is based on the actual value of the ORDER BY
-   * expression(s). An offset is used to alter the value of the ORDER BY 
expression, for
-   * instance if the current order by expression has a value of 10 and the 
lower bound offset
+   * expression(s). An offset is used to alter the value of the ORDER BY 
expression,
+   * for instance if the current ORDER BY expression has a value of 10 and the 
lower bound offset
    * is -3, the resulting lower bound for the current row will be 10 - 3 = 7. 
This however puts a
    * number of constraints on the ORDER BY expressions: there can be only one 
expression and this
    * expression must have a numerical data type. An exception can be made when 
the offset is


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to