This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 3baf7f7b710 [SPARK-44244][SQL] Assign names to the error class 
_LEGACY_ERROR_TEMP_[2305-2309]
3baf7f7b710 is described below

commit 3baf7f7b7106f3fd30257b793ff4908d0f1ec427
Author: Jiaan Geng <belie...@163.com>
AuthorDate: Sat Jul 1 12:03:42 2023 +0300

    [SPARK-44244][SQL] Assign names to the error class 
_LEGACY_ERROR_TEMP_[2305-2309]
    
    ### What changes were proposed in this pull request?
    The pr aims to assign names to the error class 
_LEGACY_ERROR_TEMP_[2305-2309].
    
    ### Why are the changes needed?
    Improve the error framework.
    
    ### Does this PR introduce _any_ user-facing change?
    'No'.
    
    ### How was this patch tested?
    Exists test cases updated and added new test cases.
    
    Closes #41788 from beliefer/SPARK-44244.
    
    Authored-by: Jiaan Geng <belie...@163.com>
    Signed-off-by: Max Gekk <max.g...@gmail.com>
---
 .../src/main/resources/error/error-classes.json    | 35 ++++++++++------------
 .../spark/sql/catalyst/analysis/Analyzer.scala     | 14 ++++-----
 .../catalyst/analysis/ResolveInlineTables.scala    | 10 +++----
 .../sql/catalyst/analysis/AnalysisSuite.scala      |  6 ++--
 .../ansi/higher-order-functions.sql.out            |  2 +-
 .../higher-order-functions.sql.out                 |  2 +-
 .../analyzer-results/inline-table.sql.out          | 16 +++++-----
 .../table-valued-functions.sql.out                 | 20 ++++++-------
 .../analyzer-results/udf/udf-inline-table.sql.out  | 16 +++++-----
 .../results/ansi/higher-order-functions.sql.out    |  2 +-
 .../results/higher-order-functions.sql.out         |  2 +-
 .../sql-tests/results/inline-table.sql.out         | 16 +++++-----
 .../results/table-valued-functions.sql.out         | 20 ++++++-------
 .../sql-tests/results/udf/udf-inline-table.sql.out | 16 +++++-----
 .../spark/sql/connector/DataSourceV2SQLSuite.scala | 13 ++++----
 .../execution/command/PlanResolutionSuite.scala    | 19 ++++++++----
 16 files changed, 105 insertions(+), 104 deletions(-)

diff --git a/common/utils/src/main/resources/error/error-classes.json 
b/common/utils/src/main/resources/error/error-classes.json
index 14bd3bc6bac..027d09eae10 100644
--- a/common/utils/src/main/resources/error/error-classes.json
+++ b/common/utils/src/main/resources/error/error-classes.json
@@ -1241,6 +1241,11 @@
         "message" : [
           "Found incompatible types in the column <colName> for inline table."
         ]
+      },
+      "NUM_COLUMNS_MISMATCH" : {
+        "message" : [
+          "Inline table expected <expectedNumCols> columns but found 
<actualNumCols> columns in row <rowIndex>."
+        ]
       }
     }
   },
@@ -1266,6 +1271,11 @@
           "The lambda function has duplicate arguments <args>. Please, 
consider to rename the argument names or set <caseSensitiveConfig> to \"true\"."
         ]
       },
+      "NON_HIGHER_ORDER_FUNCTION" : {
+        "message" : [
+          "A lambda function should only be used in a higher order function. 
However, its class is <class>, which is not a higher order function."
+        ]
+      },
       "NUM_ARGS_MISMATCH" : {
         "message" : [
           "A higher order function expects <expectedNumArgs> arguments, but 
got <actualNumArgs>."
@@ -1939,6 +1949,11 @@
     ],
     "sqlState" : "42826"
   },
+  "NUM_TABLE_VALUE_ALIASES_MISMATCH" : {
+    "message" : [
+      "Number of given aliases does not match number of output columns. 
Function name: <funcName>; number of aliases: <aliasesNum>; number of output 
columns: <outColsNum>."
+    ]
+  },
   "ORDER_BY_POS_OUT_OF_RANGE" : {
     "message" : [
       "ORDER BY position <index> is not in select list (valid range is [1, 
<size>])."
@@ -5589,26 +5604,6 @@
       "The input <valueType> '<input>' does not match the given number format: 
'<format>'."
     ]
   },
-  "_LEGACY_ERROR_TEMP_2305" : {
-    "message" : [
-      "expected <numCols> columns but found <rowSize> columns in row <ri>."
-    ]
-  },
-  "_LEGACY_ERROR_TEMP_2306" : {
-    "message" : [
-      "A lambda function should only be used in a higher order function. 
However, its class is <class>, which is not a higher order function."
-    ]
-  },
-  "_LEGACY_ERROR_TEMP_2307" : {
-    "message" : [
-      "Number of given aliases does not match number of output columns. 
Function name: <funcName>; number of aliases: <aliasesNum>; number of output 
columns: <outColsNum>."
-    ]
-  },
-  "_LEGACY_ERROR_TEMP_2309" : {
-    "message" : [
-      "cannot resolve <sqlExpr> in MERGE command given columns [<cols>]."
-    ]
-  },
   "_LEGACY_ERROR_TEMP_2311" : {
     "message" : [
       "'writeTo' can not be called on streaming Dataset/DataFrame."
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index b61dbae686b..47c266e7d18 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -1787,12 +1787,12 @@ class Analyzer(override val catalogManager: 
CatalogManager) extends RuleExecutor
       e.references.filter(!_.resolved).foreach { a =>
         // Note: This will throw error only on unresolved attribute issues,
         // not other resolution errors like mismatched data types.
-        val cols = p.inputSet.toSeq.map(_.sql).mkString(", ")
+        val cols = p.inputSet.toSeq.map(attr => 
toSQLId(attr.name)).mkString(", ")
         a.failAnalysis(
-          errorClass = "_LEGACY_ERROR_TEMP_2309",
+          errorClass = "UNRESOLVED_COLUMN.WITH_SUGGESTION",
           messageParameters = Map(
-            "sqlExpr" -> a.sql,
-            "cols" -> cols))
+            "objectName" -> toSQLId(a.name),
+            "proposal" -> cols))
       }
     }
 
@@ -2083,9 +2083,9 @@ class Analyzer(override val catalogManager: 
CatalogManager) extends RuleExecutor
         // Checks if the number of the aliases is equal to expected one
         if (u.outputNames.size != outputAttrs.size) {
           u.failAnalysis(
-            errorClass = "_LEGACY_ERROR_TEMP_2307",
+            errorClass = "NUM_TABLE_VALUE_ALIASES_MISMATCH",
             messageParameters = Map(
-              "funcName" -> u.name.quoted,
+              "funcName" -> toSQLId(u.name),
               "aliasesNum" -> u.outputNames.size.toString,
               "outColsNum" -> outputAttrs.size.toString))
         }
@@ -2103,7 +2103,7 @@ class Analyzer(override val catalogManager: 
CatalogManager) extends RuleExecutor
             resolveBuiltinOrTempFunction(nameParts, arguments, Some(u)).map {
               case func: HigherOrderFunction => func
               case other => other.failAnalysis(
-                errorClass = "_LEGACY_ERROR_TEMP_2306",
+                errorClass = 
"INVALID_LAMBDA_FUNCTION_CALL.NON_HIGHER_ORDER_FUNCTION",
                 messageParameters = Map(
                   "class" -> other.getClass.getCanonicalName))
             }.getOrElse {
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTables.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTables.scala
index 934b3bde6b5..2be1c1b7b08 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTables.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTables.scala
@@ -50,14 +50,14 @@ object ResolveInlineTables extends Rule[LogicalPlan] with 
CastSupport with Alias
   private[analysis] def validateInputDimension(table: UnresolvedInlineTable): 
Unit = {
     if (table.rows.nonEmpty) {
       val numCols = table.names.size
-      table.rows.zipWithIndex.foreach { case (row, ri) =>
+      table.rows.zipWithIndex.foreach { case (row, rowIndex) =>
         if (row.size != numCols) {
           table.failAnalysis(
-            errorClass = "_LEGACY_ERROR_TEMP_2305",
+            errorClass = "INVALID_INLINE_TABLE.NUM_COLUMNS_MISMATCH",
             messageParameters = Map(
-              "numCols" -> numCols.toString,
-              "rowSize" -> row.size.toString,
-              "ri" -> ri.toString))
+              "expectedNumCols" -> numCols.toString,
+              "actualNumCols" -> row.size.toString,
+              "rowIndex" -> rowIndex.toString))
         }
       }
     }
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala
index dae42453f0d..55005d87cdc 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala
@@ -543,10 +543,10 @@ class AnalysisSuite extends AnalysisTest with Matchers {
     assertAnalysisSuccess(rangeWithAliases(3 :: Nil, "a" :: Nil))
     assertAnalysisSuccess(rangeWithAliases(1 :: 4 :: Nil, "b" :: Nil))
     assertAnalysisSuccess(rangeWithAliases(2 :: 6 :: 2 :: Nil, "c" :: Nil))
-    assertAnalysisError(
+    assertAnalysisErrorClass(
       rangeWithAliases(3 :: Nil, "a" :: "b" :: Nil),
-      Seq("Number of given aliases does not match number of output columns. "
-        + "Function name: range; number of aliases: 2; number of output 
columns: 1."))
+      "NUM_TABLE_VALUE_ALIASES_MISMATCH",
+      Map("funcName" -> "`range`", "aliasesNum" -> "2", "outColsNum" -> "1"))
   }
 
   test("SPARK-20841 Support table column aliases in FROM clause") {
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/ansi/higher-order-functions.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/ansi/higher-order-functions.sql.out
index ac107e3e8c3..08d3be615b3 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/ansi/higher-order-functions.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/ansi/higher-order-functions.sql.out
@@ -20,7 +20,7 @@ select upper(x -> x) as v
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2306",
+  "errorClass" : "INVALID_LAMBDA_FUNCTION_CALL.NON_HIGHER_ORDER_FUNCTION",
   "messageParameters" : {
     "class" : "org.apache.spark.sql.catalyst.expressions.Upper"
   },
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/higher-order-functions.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/higher-order-functions.sql.out
index de90751777c..f656716a843 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/higher-order-functions.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/higher-order-functions.sql.out
@@ -20,7 +20,7 @@ select upper(x -> x) as v
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2306",
+  "errorClass" : "INVALID_LAMBDA_FUNCTION_CALL.NON_HIGHER_ORDER_FUNCTION",
   "messageParameters" : {
     "class" : "org.apache.spark.sql.catalyst.expressions.Upper"
   },
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/inline-table.sql.out 
b/sql/core/src/test/resources/sql-tests/analyzer-results/inline-table.sql.out
index c473c392f07..f3e6eb4d8dc 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/inline-table.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/inline-table.sql.out
@@ -118,11 +118,11 @@ select * from values ("one", 2.0), ("two") as data(a, b)
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2305",
+  "errorClass" : "INVALID_INLINE_TABLE.NUM_COLUMNS_MISMATCH",
   "messageParameters" : {
-    "numCols" : "2",
-    "ri" : "1",
-    "rowSize" : "1"
+    "actualNumCols" : "1",
+    "expectedNumCols" : "2",
+    "rowIndex" : "1"
   },
   "queryContext" : [ {
     "objectType" : "",
@@ -158,11 +158,11 @@ select * from values ("one"), ("two") as data(a, b)
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2305",
+  "errorClass" : "INVALID_INLINE_TABLE.NUM_COLUMNS_MISMATCH",
   "messageParameters" : {
-    "numCols" : "2",
-    "ri" : "0",
-    "rowSize" : "1"
+    "actualNumCols" : "1",
+    "expectedNumCols" : "2",
+    "rowIndex" : "0"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/table-valued-functions.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/table-valued-functions.sql.out
index d9e78315d92..49ad4bf19f7 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/table-valued-functions.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/table-valued-functions.sql.out
@@ -325,10 +325,10 @@ select * from explode(array(1, 2)) t(c1, c2)
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2307",
+  "errorClass" : "NUM_TABLE_VALUE_ALIASES_MISMATCH",
   "messageParameters" : {
     "aliasesNum" : "2",
-    "funcName" : "explode",
+    "funcName" : "`explode`",
     "outColsNum" : "1"
   },
   "queryContext" : [ {
@@ -448,10 +448,10 @@ select * from inline(array(struct(1, 2), struct(2, 3))) 
t(a, b, c)
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2307",
+  "errorClass" : "NUM_TABLE_VALUE_ALIASES_MISMATCH",
   "messageParameters" : {
     "aliasesNum" : "3",
-    "funcName" : "inline",
+    "funcName" : "`inline`",
     "outColsNum" : "2"
   },
   "queryContext" : [ {
@@ -604,10 +604,10 @@ select * from posexplode(array(1, 2)) t(x)
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2307",
+  "errorClass" : "NUM_TABLE_VALUE_ALIASES_MISMATCH",
   "messageParameters" : {
     "aliasesNum" : "1",
-    "funcName" : "posexplode",
+    "funcName" : "`posexplode`",
     "outColsNum" : "2"
   },
   "queryContext" : [ {
@@ -773,10 +773,10 @@ select * from json_tuple('{"a": 1, "b": 2}', 'a', 'b') AS 
t(x)
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2307",
+  "errorClass" : "NUM_TABLE_VALUE_ALIASES_MISMATCH",
   "messageParameters" : {
     "aliasesNum" : "1",
-    "funcName" : "json_tuple",
+    "funcName" : "`json_tuple`",
     "outColsNum" : "2"
   },
   "queryContext" : [ {
@@ -866,10 +866,10 @@ select * from stack(2, 1, 2, 3) t(a, b, c)
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2307",
+  "errorClass" : "NUM_TABLE_VALUE_ALIASES_MISMATCH",
   "messageParameters" : {
     "aliasesNum" : "3",
-    "funcName" : "stack",
+    "funcName" : "`stack`",
     "outColsNum" : "2"
   },
   "queryContext" : [ {
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/udf/udf-inline-table.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/udf/udf-inline-table.sql.out
index 7a6685fc9fe..a9f78a430f6 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/udf/udf-inline-table.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/udf/udf-inline-table.sql.out
@@ -102,11 +102,11 @@ select udf(a), udf(b) from values ("one", 2.0), ("two") 
as data(a, b)
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2305",
+  "errorClass" : "INVALID_INLINE_TABLE.NUM_COLUMNS_MISMATCH",
   "messageParameters" : {
-    "numCols" : "2",
-    "ri" : "1",
-    "rowSize" : "1"
+    "actualNumCols" : "1",
+    "expectedNumCols" : "2",
+    "rowIndex" : "1"
   },
   "queryContext" : [ {
     "objectType" : "",
@@ -142,11 +142,11 @@ select udf(a), udf(b) from values ("one"), ("two") as 
data(a, b)
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2305",
+  "errorClass" : "INVALID_INLINE_TABLE.NUM_COLUMNS_MISMATCH",
   "messageParameters" : {
-    "numCols" : "2",
-    "ri" : "0",
-    "rowSize" : "1"
+    "actualNumCols" : "1",
+    "expectedNumCols" : "2",
+    "rowIndex" : "0"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/results/ansi/higher-order-functions.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/ansi/higher-order-functions.sql.out
index a9ce15d9821..e479b49463e 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/ansi/higher-order-functions.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/ansi/higher-order-functions.sql.out
@@ -18,7 +18,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2306",
+  "errorClass" : "INVALID_LAMBDA_FUNCTION_CALL.NON_HIGHER_ORDER_FUNCTION",
   "messageParameters" : {
     "class" : "org.apache.spark.sql.catalyst.expressions.Upper"
   },
diff --git 
a/sql/core/src/test/resources/sql-tests/results/higher-order-functions.sql.out 
b/sql/core/src/test/resources/sql-tests/results/higher-order-functions.sql.out
index a9ce15d9821..e479b49463e 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/higher-order-functions.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/higher-order-functions.sql.out
@@ -18,7 +18,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2306",
+  "errorClass" : "INVALID_LAMBDA_FUNCTION_CALL.NON_HIGHER_ORDER_FUNCTION",
   "messageParameters" : {
     "class" : "org.apache.spark.sql.catalyst.expressions.Upper"
   },
diff --git a/sql/core/src/test/resources/sql-tests/results/inline-table.sql.out 
b/sql/core/src/test/resources/sql-tests/results/inline-table.sql.out
index 2d2e6b28360..e735dd0dd5a 100644
--- a/sql/core/src/test/resources/sql-tests/results/inline-table.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/inline-table.sql.out
@@ -132,11 +132,11 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2305",
+  "errorClass" : "INVALID_INLINE_TABLE.NUM_COLUMNS_MISMATCH",
   "messageParameters" : {
-    "numCols" : "2",
-    "ri" : "1",
-    "rowSize" : "1"
+    "actualNumCols" : "1",
+    "expectedNumCols" : "2",
+    "rowIndex" : "1"
   },
   "queryContext" : [ {
     "objectType" : "",
@@ -176,11 +176,11 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2305",
+  "errorClass" : "INVALID_INLINE_TABLE.NUM_COLUMNS_MISMATCH",
   "messageParameters" : {
-    "numCols" : "2",
-    "ri" : "0",
-    "rowSize" : "1"
+    "actualNumCols" : "1",
+    "expectedNumCols" : "2",
+    "rowIndex" : "0"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out 
b/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out
index 2703df66d2b..578461d164a 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out
@@ -374,10 +374,10 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2307",
+  "errorClass" : "NUM_TABLE_VALUE_ALIASES_MISMATCH",
   "messageParameters" : {
     "aliasesNum" : "2",
-    "funcName" : "explode",
+    "funcName" : "`explode`",
     "outColsNum" : "1"
   },
   "queryContext" : [ {
@@ -503,10 +503,10 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2307",
+  "errorClass" : "NUM_TABLE_VALUE_ALIASES_MISMATCH",
   "messageParameters" : {
     "aliasesNum" : "3",
-    "funcName" : "inline",
+    "funcName" : "`inline`",
     "outColsNum" : "2"
   },
   "queryContext" : [ {
@@ -668,10 +668,10 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2307",
+  "errorClass" : "NUM_TABLE_VALUE_ALIASES_MISMATCH",
   "messageParameters" : {
     "aliasesNum" : "1",
-    "funcName" : "posexplode",
+    "funcName" : "`posexplode`",
     "outColsNum" : "2"
   },
   "queryContext" : [ {
@@ -847,10 +847,10 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2307",
+  "errorClass" : "NUM_TABLE_VALUE_ALIASES_MISMATCH",
   "messageParameters" : {
     "aliasesNum" : "1",
-    "funcName" : "json_tuple",
+    "funcName" : "`json_tuple`",
     "outColsNum" : "2"
   },
   "queryContext" : [ {
@@ -944,10 +944,10 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2307",
+  "errorClass" : "NUM_TABLE_VALUE_ALIASES_MISMATCH",
   "messageParameters" : {
     "aliasesNum" : "3",
-    "funcName" : "stack",
+    "funcName" : "`stack`",
     "outColsNum" : "2"
   },
   "queryContext" : [ {
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/udf-inline-table.sql.out 
b/sql/core/src/test/resources/sql-tests/results/udf/udf-inline-table.sql.out
index 77de4beb79d..d12981c003e 100644
--- a/sql/core/src/test/resources/sql-tests/results/udf/udf-inline-table.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/udf/udf-inline-table.sql.out
@@ -116,11 +116,11 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2305",
+  "errorClass" : "INVALID_INLINE_TABLE.NUM_COLUMNS_MISMATCH",
   "messageParameters" : {
-    "numCols" : "2",
-    "ri" : "1",
-    "rowSize" : "1"
+    "actualNumCols" : "1",
+    "expectedNumCols" : "2",
+    "rowIndex" : "1"
   },
   "queryContext" : [ {
     "objectType" : "",
@@ -160,11 +160,11 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2305",
+  "errorClass" : "INVALID_INLINE_TABLE.NUM_COLUMNS_MISMATCH",
   "messageParameters" : {
-    "numCols" : "2",
-    "ri" : "0",
-    "rowSize" : "1"
+    "actualNumCols" : "1",
+    "expectedNumCols" : "2",
+    "rowIndex" : "0"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
index bde731e195f..5d4b672afb4 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
@@ -2161,10 +2161,10 @@ class DataSourceV2SQLSuiteV1Filter
            |THEN INSERT *""".stripMargin
       checkError(
         exception = analysisException(sql1),
-        errorClass = "_LEGACY_ERROR_TEMP_2309",
+        errorClass = "UNRESOLVED_COLUMN.WITH_SUGGESTION",
         parameters = Map(
-          "sqlExpr" -> "target.dummy",
-          "cols" -> "target.age, target.id, target.name, target.p"),
+          "objectName" -> "`target`.`dummy`",
+          "proposal" -> "`age`, `id`, `name`, `p`"),
         context = ExpectedContext("target.dummy = source.age", 206, 230))
 
       // UPDATE using non-existing column
@@ -2177,11 +2177,10 @@ class DataSourceV2SQLSuiteV1Filter
              |WHEN MATCHED AND (target.age > 10) THEN UPDATE SET target.age = 
source.dummy
              |WHEN NOT MATCHED AND (target.col2='insert')
              |THEN INSERT *""".stripMargin),
-        errorClass = "_LEGACY_ERROR_TEMP_2309",
+        errorClass = "UNRESOLVED_COLUMN.WITH_SUGGESTION",
         parameters = Map(
-          "sqlExpr" -> "source.dummy",
-          "cols" -> ("target.age, source.age, target.id, source.id, " +
-            "target.name, source.name, target.p, source.p")),
+          "objectName" -> "`source`.`dummy`",
+          "proposal" -> "`age`, `age`, `id`, `id`, `name`, `name`, `p`, `p`"),
         context = ExpectedContext("source.dummy", 219, 230))
 
       // MERGE INTO is not implemented yet.
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/PlanResolutionSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/PlanResolutionSuite.scala
index fc85ec40dbd..1f44344cb1a 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/PlanResolutionSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/PlanResolutionSuite.scala
@@ -34,6 +34,7 @@ import 
org.apache.spark.sql.catalyst.expressions.objects.StaticInvoke
 import org.apache.spark.sql.catalyst.parser.{CatalystSqlParser, ParseException}
 import org.apache.spark.sql.catalyst.plans.logical.{AlterColumn, 
AnalysisOnlyCommand, AppendData, Assignment, CreateTable, CreateTableAsSelect, 
DeleteAction, DeleteFromTable, DescribeRelation, DropTable, InsertAction, 
InsertIntoStatement, LocalRelation, LogicalPlan, MergeIntoTable, 
OneRowRelation, OverwriteByExpression, OverwritePartitionsDynamic, Project, 
SetTableLocation, SetTableProperties, ShowTableProperties, SubqueryAlias, 
UnsetTableProperties, UpdateAction, UpdateTable}
 import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.catalyst.util.TypeUtils.toSQLId
 import org.apache.spark.sql.connector.FakeV2Provider
 import org.apache.spark.sql.connector.catalog.{CatalogManager, 
CatalogNotFoundException, Column, ColumnDefaultValue, Identifier, 
SupportsDelete, Table, TableCapability, TableCatalog, V1Table}
 import 
org.apache.spark.sql.connector.catalog.CatalogManager.SESSION_CATALOG_NAME
@@ -2177,8 +2178,14 @@ class PlanResolutionSuite extends AnalysisTest {
            |WHEN NOT MATCHED BY SOURCE THEN UPDATE SET $target.s = $source.s
          """.stripMargin
       // update value in not matched by source clause can only reference the 
target table.
-      val e7 = intercept[AnalysisException](parseAndResolve(sql7))
-      assert(e7.message.contains(s"cannot resolve $source.s in MERGE command"))
+      checkError(
+        exception = intercept[AnalysisException](parseAndResolve(sql7)),
+        errorClass = "UNRESOLVED_COLUMN.WITH_SUGGESTION",
+        parameters = Map("objectName" -> s"${toSQLId(source)}.`s`", "proposal" 
-> "`i`, `s`"),
+        context = ExpectedContext(
+          fragment = s"$source.s",
+          start = 77 + target.length * 2 + source.length,
+          stop = 78 + target.length * 2 + source.length * 2))
     }
 
     val sql1 =
@@ -2206,8 +2213,8 @@ class PlanResolutionSuite extends AnalysisTest {
          |WHEN MATCHED THEN UPDATE SET *""".stripMargin
     checkError(
       exception = intercept[AnalysisException](parseAndResolve(sql2)),
-      errorClass = "_LEGACY_ERROR_TEMP_2309",
-      parameters = Map("sqlExpr" -> "s", "cols" -> "testcat.tab2.i, 
testcat.tab2.x"),
+      errorClass = "UNRESOLVED_COLUMN.WITH_SUGGESTION",
+      parameters = Map("objectName" -> "`s`", "proposal" -> "`i`, `x`"),
       context = ExpectedContext(fragment = sql2, start = 0, stop = 80))
 
     // INSERT * with incompatible schema between source and target tables.
@@ -2218,8 +2225,8 @@ class PlanResolutionSuite extends AnalysisTest {
         |WHEN NOT MATCHED THEN INSERT *""".stripMargin
     checkError(
       exception = intercept[AnalysisException](parseAndResolve(sql3)),
-      errorClass = "_LEGACY_ERROR_TEMP_2309",
-      parameters = Map("sqlExpr" -> "s", "cols" -> "testcat.tab2.i, 
testcat.tab2.x"),
+      errorClass = "UNRESOLVED_COLUMN.WITH_SUGGESTION",
+      parameters = Map("objectName" -> "`s`", "proposal" -> "`i`, `x`"),
       context = ExpectedContext(fragment = sql3, start = 0, stop = 80))
 
     val sql4 =


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to