This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new dc68e4f7dc8f [SPARK-45965][DOCS][FOLLOW-UP] Document DSv2 partitioning 
expressions moved to functions.partitioning
dc68e4f7dc8f is described below

commit dc68e4f7dc8f3b24d0328cbf7c0120d93f595c91
Author: Hyukjin Kwon <gurwls...@apache.org>
AuthorDate: Sun Nov 19 19:24:48 2023 +0900

    [SPARK-45965][DOCS][FOLLOW-UP] Document DSv2 partitioning expressions moved 
to functions.partitioning
    
    ### What changes were proposed in this pull request?
    
    This PR is a followup of https://github.com/apache/spark/pull/43858 that 
documents the newly moved functions:
    - `org.apache.spark.sql.functions.partitioning.*` becomes Scala-specific 
because they can't be used in Java side.
    - `org.apache.spark.sql.functions.*` remain as Java-specific.
    - Document `pyspark.sql.functions.partitioning.*`, and remove deprecated 
`pyspark.sql.functions.*`.
    
    ### Why are the changes needed?
    
    For users to use the moved functions.
    
    ### Does this PR introduce _any_ user-facing change?
    
    The main change has not been released out yet, so no. It adds the newly 
moved functions into the docs.
    
    ### How was this patch tested?
    
    Manually tested by building docs (Python).
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    No.
    
    Closes #43887 from HyukjinKwon/SPARK-45965-folowup.
    
    Authored-by: Hyukjin Kwon <gurwls...@apache.org>
    Signed-off-by: Hyukjin Kwon <gurwls...@apache.org>
---
 .../scala/org/apache/spark/sql/functions.scala     | 24 +++++++++++-----------
 .../source/reference/pyspark.sql/functions.rst     | 10 ++++-----
 .../scala/org/apache/spark/sql/functions.scala     | 24 +++++++++++-----------
 3 files changed, 29 insertions(+), 29 deletions(-)

diff --git 
a/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/functions.scala
 
b/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/functions.scala
index 700f71dc6e40..ba6fe725ab2c 100644
--- 
a/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/functions.scala
+++ 
b/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/functions.scala
@@ -7545,7 +7545,7 @@ object functions {
   
//////////////////////////////////////////////////////////////////////////////////////////////
 
   /**
-   * A transform for timestamps and dates to partition data into years.
+   * (Java-specific) A transform for timestamps and dates to partition data 
into years.
    *
    * @group partition_transforms
    * @since 3.4.0
@@ -7553,7 +7553,7 @@ object functions {
   def years(e: Column): Column = partitioning.years(e)
 
   /**
-   * A transform for timestamps and dates to partition data into months.
+   * (Java-specific) A transform for timestamps and dates to partition data 
into months.
    *
    * @group partition_transforms
    * @since 3.4.0
@@ -7561,7 +7561,7 @@ object functions {
   def months(e: Column): Column = partitioning.months(e)
 
   /**
-   * A transform for timestamps and dates to partition data into days.
+   * (Java-specific) A transform for timestamps and dates to partition data 
into days.
    *
    * @group partition_transforms
    * @since 3.4.0
@@ -7569,7 +7569,7 @@ object functions {
   def days(e: Column): Column = partitioning.days(e)
 
   /**
-   * A transform for timestamps to partition data into hours.
+   * (Java-specific) A transform for timestamps to partition data into hours.
    *
    * @group partition_transforms
    * @since 3.4.0
@@ -7856,7 +7856,7 @@ object functions {
   def make_ym_interval(): Column = Column.fn("make_ym_interval")
 
   /**
-   * A transform for any type that partitions by a hash of the input column.
+   * (Java-specific) A transform for any type that partitions by a hash of the 
input column.
    *
    * @group partition_transforms
    * @since 3.4.0
@@ -7864,7 +7864,7 @@ object functions {
   def bucket(numBuckets: Column, e: Column): Column = 
partitioning.bucket(numBuckets, e)
 
   /**
-   * A transform for any type that partitions by a hash of the input column.
+   * (Java-specific) A transform for any type that partitions by a hash of the 
input column.
    *
    * @group partition_transforms
    * @since 3.4.0
@@ -8409,7 +8409,7 @@ object functions {
   object partitioning {
     // scalastyle:on
     /**
-     * A transform for timestamps and dates to partition data into years.
+     * (Scala-specific) A transform for timestamps and dates to partition data 
into years.
      *
      * @group partition_transforms
      * @since 4.0.0
@@ -8417,7 +8417,7 @@ object functions {
     def years(e: Column): Column = Column.fn("years", e)
 
     /**
-     * A transform for timestamps and dates to partition data into months.
+     * (Scala-specific) A transform for timestamps and dates to partition data 
into months.
      *
      * @group partition_transforms
      * @since 4.0.0
@@ -8425,7 +8425,7 @@ object functions {
     def months(e: Column): Column = Column.fn("months", e)
 
     /**
-     * A transform for timestamps and dates to partition data into days.
+     * (Scala-specific) A transform for timestamps and dates to partition data 
into days.
      *
      * @group partition_transforms
      * @since 4.0.0
@@ -8433,7 +8433,7 @@ object functions {
     def days(e: Column): Column = Column.fn("days", e)
 
     /**
-     * A transform for timestamps to partition data into hours.
+     * (Scala-specific) A transform for timestamps to partition data into 
hours.
      *
      * @group partition_transforms
      * @since 4.0.0
@@ -8441,7 +8441,7 @@ object functions {
     def hours(e: Column): Column = Column.fn("hours", e)
 
     /**
-     * A transform for any type that partitions by a hash of the input column.
+     * (Scala-specific) A transform for any type that partitions by a hash of 
the input column.
      *
      * @group partition_transforms
      * @since 4.0.0
@@ -8449,7 +8449,7 @@ object functions {
     def bucket(numBuckets: Column, e: Column): Column = Column.fn("bucket", 
numBuckets, e)
 
     /**
-     * A transform for any type that partitions by a hash of the input column.
+     * (Scala-specific) A transform for any type that partitions by a hash of 
the input column.
      *
      * @group partition_transforms
      * @since 4.0.0
diff --git a/python/docs/source/reference/pyspark.sql/functions.rst 
b/python/docs/source/reference/pyspark.sql/functions.rst
index 4dc10cc15561..7ec71f8b4cc1 100644
--- a/python/docs/source/reference/pyspark.sql/functions.rst
+++ b/python/docs/source/reference/pyspark.sql/functions.rst
@@ -509,11 +509,11 @@ Partition Transformation Functions
 .. autosummary::
     :toctree: api/
 
-    years
-    months
-    days
-    hours
-    bucket
+    partitioning.years
+    partitioning.months
+    partitioning.days
+    partitioning.hours
+    partitioning.bucket
 
 
 CSV Functions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
index 2576c9b08313..a52412e94ee5 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
@@ -7263,7 +7263,7 @@ object functions {
   def to_xml(e: Column): Column = to_xml(e, Map.empty[String, String].asJava)
 
   /**
-   * A transform for timestamps and dates to partition data into years.
+   * (Java-specific) A transform for timestamps and dates to partition data 
into years.
    *
    * @group partition_transforms
    * @since 3.0.0
@@ -7271,7 +7271,7 @@ object functions {
   def years(e: Column): Column = partitioning.years(e)
 
   /**
-   * A transform for timestamps and dates to partition data into months.
+   * (Java-specific) A transform for timestamps and dates to partition data 
into months.
    *
    * @group partition_transforms
    * @since 3.0.0
@@ -7279,7 +7279,7 @@ object functions {
   def months(e: Column): Column = partitioning.months(e)
 
   /**
-   * A transform for timestamps and dates to partition data into days.
+   * (Java-specific) A transform for timestamps and dates to partition data 
into days.
    *
    * @group partition_transforms
    * @since 3.0.0
@@ -7374,7 +7374,7 @@ object functions {
     Column.fn("xpath_string", xml, path)
 
   /**
-   * A transform for timestamps to partition data into hours.
+   * (Java-specific) A transform for timestamps to partition data into hours.
    *
    * @group partition_transforms
    * @since 3.0.0
@@ -7657,7 +7657,7 @@ object functions {
   def make_ym_interval(): Column = Column.fn("make_ym_interval")
 
   /**
-   * A transform for any type that partitions by a hash of the input column.
+   * (Java-specific) A transform for any type that partitions by a hash of the 
input column.
    *
    * @group partition_transforms
    * @since 3.0.0
@@ -7665,7 +7665,7 @@ object functions {
   def bucket(numBuckets: Column, e: Column): Column = 
partitioning.bucket(numBuckets, e)
 
   /**
-   * A transform for any type that partitions by a hash of the input column.
+   * (Java-specific) A transform for any type that partitions by a hash of the 
input column.
    *
    * @group partition_transforms
    * @since 3.0.0
@@ -8288,7 +8288,7 @@ object functions {
   object partitioning {
   // scalastyle:on
     /**
-     * A transform for timestamps and dates to partition data into years.
+     * (Scala-specific) A transform for timestamps and dates to partition data 
into years.
      *
      * @group partition_transforms
      * @since 4.0.0
@@ -8296,7 +8296,7 @@ object functions {
     def years(e: Column): Column = withExpr { Years(e.expr) }
 
     /**
-     * A transform for timestamps and dates to partition data into months.
+     * (Scala-specific) A transform for timestamps and dates to partition data 
into months.
      *
      * @group partition_transforms
      * @since 4.0.0
@@ -8304,7 +8304,7 @@ object functions {
     def months(e: Column): Column = withExpr { Months(e.expr) }
 
     /**
-     * A transform for timestamps and dates to partition data into days.
+     * (Scala-specific) A transform for timestamps and dates to partition data 
into days.
      *
      * @group partition_transforms
      * @since 4.0.0
@@ -8312,7 +8312,7 @@ object functions {
     def days(e: Column): Column = withExpr { Days(e.expr) }
 
     /**
-     * A transform for timestamps to partition data into hours.
+     * (Scala-specific) A transform for timestamps to partition data into 
hours.
      *
      * @group partition_transforms
      * @since 4.0.0
@@ -8320,7 +8320,7 @@ object functions {
     def hours(e: Column): Column = withExpr { Hours(e.expr) }
 
     /**
-     * A transform for any type that partitions by a hash of the input column.
+     * (Scala-specific) A transform for any type that partitions by a hash of 
the input column.
      *
      * @group partition_transforms
      * @since 4.0.0
@@ -8335,7 +8335,7 @@ object functions {
     }
 
     /**
-     * A transform for any type that partitions by a hash of the input column.
+     * (Scala-specific) A transform for any type that partitions by a hash of 
the input column.
      *
      * @group partition_transforms
      * @since 4.0.0


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to