This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 981312284f0 [SPARK-45188][SQL][DOCS] Update error messages related to 
parameterized `sql()`
981312284f0 is described below

commit 981312284f0776ca847c8d21411f74a72c639b22
Author: Max Gekk <max.g...@gmail.com>
AuthorDate: Tue Sep 19 00:22:43 2023 +0300

    [SPARK-45188][SQL][DOCS] Update error messages related to parameterized 
`sql()`
    
    ### What changes were proposed in this pull request?
    In the PR, I propose to update some error formats and comments regarding 
`sql()` parameters - maps, arrays and struct might be used as `sql()` 
parameters. New behaviour has been added by 
https://github.com/apache/spark/pull/42752.
    
    ### Why are the changes needed?
    To inform users about recent changes introduced by SPARK-45033.
    
    ### Does this PR introduce _any_ user-facing change?
    No.
    
    ### How was this patch tested?
    By running the affected test suite:
    ```
    $ build/sbt "core/testOnly *SparkThrowableSuite"
    ```
    
    ### Was this patch authored or co-authored using generative AI tooling?
    No.
    
    Closes #42957 from MaxGekk/clean-ClientE2ETestSuite.
    
    Authored-by: Max Gekk <max.g...@gmail.com>
    Signed-off-by: Max Gekk <max.g...@gmail.com>
---
 .../src/main/resources/error/error-classes.json      |  4 ++--
 .../scala/org/apache/spark/sql/SparkSession.scala    | 11 +++++++----
 docs/sql-error-conditions.md                         |  4 ++--
 python/pyspark/pandas/sql_formatter.py               |  3 ++-
 python/pyspark/sql/session.py                        |  3 ++-
 .../spark/sql/catalyst/analysis/parameters.scala     | 14 +++++++++-----
 .../scala/org/apache/spark/sql/SparkSession.scala    | 20 ++++++++++----------
 7 files changed, 34 insertions(+), 25 deletions(-)

diff --git a/common/utils/src/main/resources/error/error-classes.json 
b/common/utils/src/main/resources/error/error-classes.json
index 4740ed72f89..186e7b4640d 100644
--- a/common/utils/src/main/resources/error/error-classes.json
+++ b/common/utils/src/main/resources/error/error-classes.json
@@ -1892,7 +1892,7 @@
   },
   "INVALID_SQL_ARG" : {
     "message" : [
-      "The argument <name> of `sql()` is invalid. Consider to replace it by a 
SQL literal."
+      "The argument <name> of `sql()` is invalid. Consider to replace it 
either by a SQL literal or by collection constructor functions such as `map()`, 
`array()`, `struct()`."
     ]
   },
   "INVALID_SQL_SYNTAX" : {
@@ -2768,7 +2768,7 @@
   },
   "UNBOUND_SQL_PARAMETER" : {
     "message" : [
-      "Found the unbound parameter: <name>. Please, fix `args` and provide a 
mapping of the parameter to a SQL literal."
+      "Found the unbound parameter: <name>. Please, fix `args` and provide a 
mapping of the parameter to either a SQL literal or collection constructor 
functions such as `map()`, `array()`, `struct()`."
     ],
     "sqlState" : "42P02"
   },
diff --git 
a/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/SparkSession.scala
 
b/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/SparkSession.scala
index 8788e34893e..5aa8c5a2bd5 100644
--- 
a/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/SparkSession.scala
+++ 
b/connector/connect/client/jvm/src/main/scala/org/apache/spark/sql/SparkSession.scala
@@ -235,8 +235,9 @@ class SparkSession private[sql] (
    *   An array of Java/Scala objects that can be converted to SQL literal 
expressions. See <a
    *   href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html";> 
Supported Data
    *   Types</a> for supported value types in Scala/Java. For example: 1, 
"Steven",
-   *   LocalDate.of(2023, 4, 2). A value can be also a `Column` of literal 
expression, in that
-   *   case it is taken as is.
+   *   LocalDate.of(2023, 4, 2). A value can be also a `Column` of a literal 
or collection
+   *   constructor functions such as `map()`, `array()`, `struct()`, in that 
case it is taken as
+   *   is.
    *
    * @since 3.5.0
    */
@@ -272,7 +273,8 @@ class SparkSession private[sql] (
    *   expressions. See <a 
href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html";>
    *   Supported Data Types</a> for supported value types in Scala/Java. For 
example, map keys:
    *   "rank", "name", "birthdate"; map values: 1, "Steven", 
LocalDate.of(2023, 4, 2). Map value
-   *   can be also a `Column` of literal expression, in that case it is taken 
as is.
+   *   can be also a `Column` of a literal or collection constructor functions 
such as `map()`,
+   *   `array()`, `struct()`, in that case it is taken as is.
    *
    * @since 3.4.0
    */
@@ -292,7 +294,8 @@ class SparkSession private[sql] (
    *   expressions. See <a 
href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html";>
    *   Supported Data Types</a> for supported value types in Scala/Java. For 
example, map keys:
    *   "rank", "name", "birthdate"; map values: 1, "Steven", 
LocalDate.of(2023, 4, 2). Map value
-   *   can be also a `Column` of literal expression, in that case it is taken 
as is.
+   *   can be also a `Column` of a literal or collection constructor functions 
such as `map()`,
+   *   `array()`, `struct()`, in that case it is taken as is.
    *
    * @since 3.4.0
    */
diff --git a/docs/sql-error-conditions.md b/docs/sql-error-conditions.md
index 444c2b7c0d1..4f982e52bc8 100644
--- a/docs/sql-error-conditions.md
+++ b/docs/sql-error-conditions.md
@@ -1113,7 +1113,7 @@ Expected format is 'SET', 'SET key', or 'SET key=value'. 
If you want to include
 
 SQLSTATE: none assigned
 
-The argument `<name>` of `sql()` is invalid. Consider to replace it by a SQL 
literal.
+The argument `<name>` of `sql()` is invalid. Consider to replace it either by 
a SQL literal or by collection constructor functions such as `map()`, 
`array()`, `struct()`.
 
 ### 
[INVALID_SQL_SYNTAX](sql-error-conditions-invalid-sql-syntax-error-class.html)
 
@@ -1835,7 +1835,7 @@ Unable to infer schema for `<format>`. It must be 
specified manually.
 
 [SQLSTATE: 
42P02](sql-error-conditions-sqlstates.html#class-42-syntax-error-or-access-rule-violation)
 
-Found the unbound parameter: `<name>`. Please, fix `args` and provide a 
mapping of the parameter to a SQL literal.
+Found the unbound parameter: `<name>`. Please, fix `args` and provide a 
mapping of the parameter to either a SQL literal or collection constructor 
functions such as `map()`, `array()`, `struct()`.
 
 ### UNCLOSED_BRACKETED_COMMENT
 
diff --git a/python/pyspark/pandas/sql_formatter.py 
b/python/pyspark/pandas/sql_formatter.py
index 8593703bd94..91c4f0b7d77 100644
--- a/python/pyspark/pandas/sql_formatter.py
+++ b/python/pyspark/pandas/sql_formatter.py
@@ -108,7 +108,8 @@ def sql(
         Supported Data Types</a> for supported value types in Python.
         For example, dictionary keys: "rank", "name", "birthdate";
         dictionary values: 1, "Steven", datetime.date(2023, 4, 2).
-        A value can be also a `Column` of literal expression, in that case it 
is taken as is.
+        A value can be also a `Column` of a literal or collection constructor 
functions such
+        as `map()`, `array()`, `struct()`, in that case it is taken as is.
 
 
         .. versionadded:: 3.4.0
diff --git a/python/pyspark/sql/session.py b/python/pyspark/sql/session.py
index 1895bf32ccf..dc4f8f321a5 100644
--- a/python/pyspark/sql/session.py
+++ b/python/pyspark/sql/session.py
@@ -1518,7 +1518,8 @@ class SparkSession(SparkConversionMixin):
             Supported Data Types</a> for supported value types in Python.
             For example, dictionary keys: "rank", "name", "birthdate";
             dictionary or list values: 1, "Steven", datetime.date(2023, 4, 2).
-            A value can be also a `Column` of literal expression, in that case 
it is taken as is.
+            A value can be also a `Column` of a literal or collection 
constructor functions such
+            as `map()`, `array()`, `struct()`, in that case it is taken as is.
 
             .. versionadded:: 3.4.0
 
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/parameters.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/parameters.scala
index a6072dcdd2c..66da63f363f 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/parameters.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/parameters.scala
@@ -41,14 +41,16 @@ sealed trait Parameter extends LeafExpression with 
Unevaluable {
 }
 
 /**
- * The expression represents a named parameter that should be replaced by a 
literal.
+ * The expression represents a named parameter that should be replaced by a 
literal or
+ * collection constructor functions such as `map()`, `array()`, `struct()`.
  *
  * @param name The identifier of the parameter without the marker.
  */
 case class NamedParameter(name: String) extends Parameter
 
 /**
- * The expression represents a positional parameter that should be replaced by 
a literal.
+ * The expression represents a positional parameter that should be replaced by 
a literal or
+ * by collection constructor functions such as `map()`, `array()`, `struct()`.
  *
  * @param pos An unique position of the parameter in a SQL query text.
  */
@@ -92,7 +94,8 @@ object NameParameterizedQuery {
  * The logical plan representing a parameterized query with positional 
parameters.
  *
  * @param child The parameterized logical plan.
- * @param args The literal values of positional parameters.
+ * @param args The literal values or collection constructor functions such as 
`map()`,
+ *             `array()`, `struct()` of positional parameters.
  */
 case class PosParameterizedQuery(child: LogicalPlan, args: Seq[Expression])
   extends ParameterizedQuery(child) {
@@ -102,8 +105,9 @@ case class PosParameterizedQuery(child: LogicalPlan, args: 
Seq[Expression])
 }
 
 /**
- * Finds all named parameters in `ParameterizedQuery` and substitutes them by 
literals from the
- * user-specified arguments.
+ * Finds all named parameters in `ParameterizedQuery` and substitutes them by 
literals or
+ * by collection constructor functions such as `map()`, `array()`, `struct()`
+ * from the user-specified arguments.
  */
 object BindParameters extends Rule[LogicalPlan] with QueryErrorsBase {
   private def checkArgs(args: Iterable[(String, Expression)]): Unit = {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala
index 27ae10b3d59..971cf9194d0 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala
@@ -621,8 +621,8 @@ class SparkSession private(
    *             <a 
href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html";>
    *             Supported Data Types</a> for supported value types in 
Scala/Java.
    *             For example, 1, "Steven", LocalDate.of(2023, 4, 2).
-   *             A value can be also a `Column` of literal expression, in that 
case
-   *             it is taken as is.
+   *             A value can be also a `Column` of a literal or collection 
constructor functions
+   *             such as `map()`, `array()`, `struct()`, in that case it is 
taken as is.
    * @param tracker A tracker that can notify when query is ready for execution
    */
   private[sql] def sql(sqlText: String, args: Array[_], tracker: 
QueryPlanningTracker): DataFrame =
@@ -649,8 +649,8 @@ class SparkSession private(
    *             <a 
href="https://spark.apache.org/docs/latest/sql-ref-datatypes.html";>
    *             Supported Data Types</a> for supported value types in 
Scala/Java.
    *             For example, 1, "Steven", LocalDate.of(2023, 4, 2).
-   *             A value can be also a `Column` of literal expression, in that 
case
-   *             it is taken as is.
+   *             A value can be also a `Column` of a literal or collection 
constructor functions
+   *             such as `map()`, `array()`, `struct()`, in that case it is 
taken as is.
    *
    * @since 3.5.0
    */
@@ -671,8 +671,8 @@ class SparkSession private(
    *             Supported Data Types</a> for supported value types in 
Scala/Java.
    *             For example, map keys: "rank", "name", "birthdate";
    *             map values: 1, "Steven", LocalDate.of(2023, 4, 2).
-   *             Map value can be also a `Column` of literal expression, in 
that case
-   *             it is taken as is.
+   *             Map value can be also a `Column` of a literal or collection 
constructor functions
+   *             such as `map()`, `array()`, `struct()`, in that case it is 
taken as is.
    * @param tracker A tracker that can notify when query is ready for execution
    */
   private[sql] def sql(
@@ -703,8 +703,8 @@ class SparkSession private(
    *             Supported Data Types</a> for supported value types in 
Scala/Java.
    *             For example, map keys: "rank", "name", "birthdate";
    *             map values: 1, "Steven", LocalDate.of(2023, 4, 2).
-   *             Map value can be also a `Column` of literal expression, in 
that case
-   *             it is taken as is.
+   *             Map value can be also a `Column` of a literal or collection 
constructor functions
+   *             such as `map()`, `array()`, `struct()`, in that case it is 
taken as is.
    *
    * @since 3.4.0
    */
@@ -725,8 +725,8 @@ class SparkSession private(
    *             Supported Data Types</a> for supported value types in 
Scala/Java.
    *             For example, map keys: "rank", "name", "birthdate";
    *             map values: 1, "Steven", LocalDate.of(2023, 4, 2).
-   *             Map value can be also a `Column` of literal expression, in 
that case
-   *             it is taken as is.
+   *             Map value can be also a `Column` of a literal or collection 
constructor functions
+   *             such as `map()`, `array()`, `struct()`, in that case it is 
taken as is.
    *
    * @since 3.4.0
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to