This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 8aaa6aa41ef [SPARK-40978][SQL] Migrate `failAnalysis()` w/o a context 
onto error classes
8aaa6aa41ef is described below

commit 8aaa6aa41ef679817bd4bfe20927b83d3f5c5b54
Author: Max Gekk <max.g...@gmail.com>
AuthorDate: Tue Nov 1 16:24:41 2022 +0300

    [SPARK-40978][SQL] Migrate `failAnalysis()` w/o a context onto error classes
    
    ### What changes were proposed in this pull request?
    In the PR, I propose to migrate `failAnalysis()` errors without a context 
onto temporary error classes with the prefix `_LEGACY_ERROR_TEMP_24xx`. The 
error message will not include the error classes, so, in this way we will 
preserve the existing behaviour.
    
    ### Why are the changes needed?
    The migration on temporary error classes allows to gather statistics about 
errors and detect most popular error classes. After that we could prioritise 
the work on migration.
    
    The new error class name prefix `_LEGACY_ERROR_TEMP_` proposed here kind of 
marks the error as developer-facing, not user-facing. Developers can still get 
the error class programmatically via the `SparkThrowable` interface, so that 
they can build error infra with it. End users won't see the error class in the 
message. This allows us to do the error migration very quickly, and we can 
refine the error classes and mark them as user-facing later (naming them 
properly, adding tests, etc.).
    
    ### Does this PR introduce _any_ user-facing change?
    No. The error messages should be almost the same by default.
    
    ### How was this patch tested?
    By running the affected test suites:
    ```
    $ PYSPARK_PYTHON=python3 build/sbt "sql/testOnly 
org.apache.spark.sql.SQLQueryTestSuite"
    ```
    
    Closes #38454 from MaxGekk/legacy-error-class-failAnalysis-2.
    
    Authored-by: Max Gekk <max.g...@gmail.com>
    Signed-off-by: Max Gekk <max.g...@gmail.com>
---
 core/src/main/resources/error/error-classes.json   | 244 ++++++
 .../spark/sql/catalyst/analysis/Analyzer.scala     |   5 +-
 .../sql/catalyst/analysis/CheckAnalysis.scala      | 278 ++++---
 .../sql/catalyst/analysis/AnalysisErrorSuite.scala |   8 +-
 .../CreateTablePartitioningValidationSuite.scala   |   7 +-
 .../resources/sql-tests/results/except-all.sql.out |  22 +-
 .../sql-tests/results/group-analytics.sql.out      |   8 +-
 .../sql-tests/results/group-by-filter.sql.out      |   8 +-
 .../resources/sql-tests/results/group-by.sql.out   |  67 +-
 .../sql-tests/results/intersect-all.sql.out        |  22 +-
 .../test/resources/sql-tests/results/limit.sql.out |  48 +-
 .../sql-tests/results/percentiles.sql.out          |  84 ++-
 .../test/resources/sql-tests/results/pivot.sql.out |   4 +-
 .../results/postgreSQL/aggregates_part3.sql.out    |   4 +-
 .../sql-tests/results/postgreSQL/limit.sql.out     |  16 +-
 .../results/postgreSQL/select_having.sql.out       |   8 +-
 .../results/postgreSQL/window_part3.sql.out        |  22 +-
 .../negative-cases/invalid-correlation.sql.out     |  15 +-
 .../native/widenSetOperationTypes.sql.out          | 840 +++++++++++++++++++--
 .../results/udaf/udaf-group-by-ordinal.sql.out     |   8 +-
 .../sql-tests/results/udaf/udaf-group-by.sql.out   |   8 +-
 .../udf/postgreSQL/udf-aggregates_part3.sql.out    |   4 +-
 .../udf/postgreSQL/udf-select_having.sql.out       |   8 +-
 .../sql-tests/results/udf/udf-except-all.sql.out   |  22 +-
 .../results/udf/udf-group-analytics.sql.out        |   8 +-
 .../sql-tests/results/udf/udf-group-by.sql.out     |  56 +-
 .../results/udf/udf-intersect-all.sql.out          |  22 +-
 .../sql-tests/results/udf/udf-pivot.sql.out        |   4 +-
 28 files changed, 1576 insertions(+), 274 deletions(-)

diff --git a/core/src/main/resources/error/error-classes.json 
b/core/src/main/resources/error/error-classes.json
index 1617c88e9fe..5abd0bd9630 100644
--- a/core/src/main/resources/error/error-classes.json
+++ b/core/src/main/resources/error/error-classes.json
@@ -4504,5 +4504,249 @@
     "message" : [
       "<msg>"
     ]
+  },
+  "_LEGACY_ERROR_TEMP_2400" : {
+    "message" : [
+      "The <name> expression must evaluate to a constant value, but got 
<limitExpr>."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2401" : {
+    "message" : [
+      "The <name> expression must be integer type, but got <dataType>."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2402" : {
+    "message" : [
+      "The evaluated <name> expression must not be null, but got <limitExpr>."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2403" : {
+    "message" : [
+      "The <name> expression must be equal to or greater than 0, but got <v>."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2404" : {
+    "message" : [
+      "Table <name> is not partitioned."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2405" : {
+    "message" : [
+      "Table <name> does not support partition management."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2406" : {
+    "message" : [
+      "invalid cast from <srcType> to <targetType>."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2407" : {
+    "message" : [
+      "grouping_id() can only be used with GroupingSets/Cube/Rollup"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2408" : {
+    "message" : [
+      "Window function <w> requires an OVER clause."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2409" : {
+    "message" : [
+      "Distinct window functions are not supported: <w>"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2410" : {
+    "message" : [
+      "<wf> function can only be evaluated in an ordered row-based window 
frame with a single offset: <w>"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2411" : {
+    "message" : [
+      "Cannot specify order by or frame for '<aggFunc>'."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2412" : {
+    "message" : [
+      "Expression '<sqlExpr>' not supported within a window function."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2413" : {
+    "message" : [
+      "Input argument to <argName> must be a constant."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2414" : {
+    "message" : [
+      "Event time must be defined on a window or a timestamp, but <evName> is 
of type <evType>."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2415" : {
+    "message" : [
+      "filter expression '<filter>' of type <type> is not a boolean."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2416" : {
+    "message" : [
+      "join condition '<join>' of type <type> is not a boolean."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2417" : {
+    "message" : [
+      "join condition '<condition>' of type <dataType> is not a boolean."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2418" : {
+    "message" : [
+      "Input argument tolerance must be a constant."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2419" : {
+    "message" : [
+      "Input argument tolerance must be non-negative."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2420" : {
+    "message" : [
+      "It is not allowed to use an aggregate function in the argument of 
another aggregate function. Please use the inner aggregate function in a 
sub-query."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2421" : {
+    "message" : [
+      "nondeterministic expression <sqlExpr> should not appear in the 
arguments of an aggregate function."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2422" : {
+    "message" : [
+      "grouping expressions sequence is empty, and '<sqlExpr>' is not an 
aggregate function. Wrap '<aggExprs>' in windowing function(s) or wrap 
'<sqlExpr>' in first() (or first_value) if you don't care which value you get."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2423" : {
+    "message" : [
+      "Correlated scalar subquery '<sqlExpr>' is neither present in the group 
by, nor in an aggregate function. Add it to group by using ordinal position or 
wrap it in first() (or first_value) if you don't care which value you get."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2424" : {
+    "message" : [
+      "aggregate functions are not allowed in GROUP BY, but found <sqlExpr>"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2425" : {
+    "message" : [
+      "expression <sqlExpr> cannot be used as a grouping expression because 
its data type <dataType> is not an orderable data type."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2426" : {
+    "message" : [
+      "nondeterministic expression <sqlExpr> should not appear in grouping 
expression."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2427" : {
+    "message" : [
+      "sorting is not supported for columns of type <type>"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2428" : {
+    "message" : [
+      "The sum of the LIMIT clause and the OFFSET clause must not be greater 
than the maximum 32-bit integer value (2,147,483,647) but found limit = 
<limit>, offset = <offset>."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2429" : {
+    "message" : [
+      "<operator> can only be performed on tables with the same number of 
columns, but the first table has <firstColNum> columns and the <nTab> table has 
<nColNum> columns."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2430" : {
+    "message" : [
+      "<operator> can only be performed on tables with compatible column 
types. The <ci> column of the <ti> table is <dt1> type which is not compatible 
with <dt2> at the same column of the first table.<hint>"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2431" : {
+    "message" : [
+      "Invalid partitioning: <cols> is missing or is in a map or array"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2432" : {
+    "message" : [
+      "<msg>"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2433" : {
+    "message" : [
+      "Only a single table generating function is allowed in a SELECT clause, 
found:",
+      "<sqlExprs>"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2434" : {
+    "message" : [
+      "Failure when resolving conflicting references in Join:",
+      "<plan>",
+      "Conflicting attributes: <conflictingAttributes>"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2435" : {
+    "message" : [
+      "Failure when resolving conflicting references in Intersect:",
+      "<plan>",
+      "Conflicting attributes: <conflictingAttributes>"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2436" : {
+    "message" : [
+      "Failure when resolving conflicting references in Except:",
+      "<plan>",
+      "Conflicting attributes: <conflictingAttributes>"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2437" : {
+    "message" : [
+      "Failure when resolving conflicting references in AsOfJoin:",
+      "<plan>",
+      "Conflicting attributes: <conflictingAttributes>"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2438" : {
+    "message" : [
+      "Cannot have map type columns in DataFrame which calls set 
operations(intersect, except, etc.), but the type of column <colName> is 
<dataType>."
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2439" : {
+    "message" : [
+      "nondeterministic expressions are only allowed in Project, Filter, 
Aggregate or Window, found:",
+      "<sqlExprs>",
+      "in operator <operator>"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2440" : {
+    "message" : [
+      "Aggregate/Window/Generate expressions are not valid in where clause of 
the query.",
+      "Expression in where clause: [<condition>]",
+      "Invalid expressions: [<invalidExprSqls>]"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2441" : {
+    "message" : [
+      "The query operator `<operator>` contains one or more unsupported 
expression types Aggregate, Window or Generate.",
+      "Invalid expressions: [<invalidExprSqls>]"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2442" : {
+    "message" : [
+      "unresolved operator <operator>"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2443" : {
+    "message" : [
+      "Multiple definitions of observed metrics named '<name>': <plan>"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2444" : {
+    "message" : [
+      "Function '<funcName>' does not implement ScalarFunction or 
AggregateFunction"
+    ]
+  },
+  "_LEGACY_ERROR_TEMP_2445" : {
+    "message" : [
+      "grouping() can only be used with GroupingSets/Cube/Rollup"
+    ]
   }
 }
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index bb4f01819f0..eca304c9d1b 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -2416,8 +2416,9 @@ class Analyzer(override val catalogManager: 
CatalogManager)
         case aggFunc: V2AggregateFunction[_, _] =>
           processV2AggregateFunction(aggFunc, arguments, u)
         case _ =>
-          failAnalysis(s"Function '${bound.name()}' does not implement 
ScalarFunction" +
-            s" or AggregateFunction")
+          failAnalysis(
+            errorClass = "_LEGACY_ERROR_TEMP_2444",
+            messageParameters = Map("funcName" -> bound.name()))
       }
     }
 
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
index 6788abe3f34..0b688dc5f7c 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
@@ -52,8 +52,14 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
 
   val DATA_TYPE_MISMATCH_ERROR = TreeNodeTag[Boolean]("dataTypeMismatchError")
 
-  protected def failAnalysis(msg: String): Nothing = {
-    throw new AnalysisException(msg)
+  /**
+   * Fails the analysis at the point where a specific tree node was parsed 
using a provided
+   * error class and message parameters.
+   */
+  def failAnalysis(errorClass: String, messageParameters: Map[String, 
String]): Nothing = {
+    throw new AnalysisException(
+      errorClass = errorClass,
+      messageParameters = messageParameters)
   }
 
   protected def containsMultipleGenerators(exprs: Seq[Expression]): Boolean = {
@@ -77,17 +83,27 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
   private def checkLimitLikeClause(name: String, limitExpr: Expression): Unit 
= {
     limitExpr match {
       case e if !e.foldable => failAnalysis(
-        s"The $name expression must evaluate to a constant value, but got " +
-          limitExpr.sql)
+        errorClass = "_LEGACY_ERROR_TEMP_2400",
+        messageParameters = Map(
+          "name" -> name,
+          "limitExpr" -> limitExpr.sql))
       case e if e.dataType != IntegerType => failAnalysis(
-        s"The $name expression must be integer type, but got " +
-          e.dataType.catalogString)
+        errorClass = "_LEGACY_ERROR_TEMP_2401",
+        messageParameters = Map(
+          "name" -> name,
+          "dataType" -> e.dataType.catalogString))
       case e =>
         e.eval() match {
           case null => failAnalysis(
-            s"The evaluated $name expression must not be null, but got 
${limitExpr.sql}")
+            errorClass = "_LEGACY_ERROR_TEMP_2402",
+            messageParameters = Map(
+              "name" -> name,
+              "limitExpr" -> limitExpr.sql))
           case v: Int if v < 0 => failAnalysis(
-            s"The $name expression must be equal to or greater than 0, but got 
$v")
+            errorClass = "_LEGACY_ERROR_TEMP_2403",
+            messageParameters = Map(
+              "name" -> name,
+              "v" -> v.toString))
           case _ => // OK
         }
     }
@@ -173,10 +189,14 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
           case r @ ResolvedTable(_, _, table, _) => table match {
             case t: SupportsPartitionManagement =>
               if (t.partitionSchema.isEmpty) {
-                failAnalysis(s"Table ${r.name} is not partitioned.")
+                failAnalysis(
+                  errorClass = "_LEGACY_ERROR_TEMP_2404",
+                  messageParameters = Map("name" -> r.name))
               }
             case _ =>
-              failAnalysis(s"Table ${r.name} does not support partition 
management.")
+              failAnalysis(
+                errorClass = "_LEGACY_ERROR_TEMP_2405",
+                messageParameters = Map("name" -> r.name))
           }
           case _ =>
         }
@@ -234,30 +254,40 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
             }
 
           case c: Cast if !c.resolved =>
-            failAnalysis(s"invalid cast from ${c.child.dataType.catalogString} 
to " +
-              c.dataType.catalogString)
+            failAnalysis(
+              errorClass = "_LEGACY_ERROR_TEMP_2406",
+              messageParameters = Map(
+                "srcType" -> c.child.dataType.catalogString,
+                "targetType" -> c.dataType.catalogString))
           case e: RuntimeReplaceable if !e.replacement.resolved =>
             throw new IllegalStateException("Illegal RuntimeReplaceable: " + e 
+
               "\nReplacement is unresolved: " + e.replacement)
 
           case g: Grouping =>
-            failAnalysis("grouping() can only be used with 
GroupingSets/Cube/Rollup")
+            failAnalysis(errorClass = "_LEGACY_ERROR_TEMP_2445", 
messageParameters = Map.empty)
           case g: GroupingID =>
-            failAnalysis("grouping_id() can only be used with 
GroupingSets/Cube/Rollup")
+            failAnalysis(errorClass = "_LEGACY_ERROR_TEMP_2407", 
messageParameters = Map.empty)
 
           case e: Expression if 
e.children.exists(_.isInstanceOf[WindowFunction]) &&
               !e.isInstanceOf[WindowExpression] && e.resolved =>
             val w = e.children.find(_.isInstanceOf[WindowFunction]).get
-            failAnalysis(s"Window function $w requires an OVER clause.")
+            failAnalysis(
+              errorClass = "_LEGACY_ERROR_TEMP_2408",
+              messageParameters = Map("w" -> w.toString))
 
           case w @ WindowExpression(AggregateExpression(_, _, true, _, _), _) 
=>
-            failAnalysis(s"Distinct window functions are not supported: $w")
+            failAnalysis(
+              errorClass = "_LEGACY_ERROR_TEMP_2409",
+              messageParameters = Map("w" -> w.toString))
 
           case w @ WindowExpression(wf: FrameLessOffsetWindowFunction,
             WindowSpecDefinition(_, order, frame: SpecifiedWindowFrame))
              if order.isEmpty || !frame.isOffset =>
-            failAnalysis(s"${wf.prettyName} function can only be evaluated in 
an ordered " +
-              s"row-based window frame with a single offset: $w")
+            failAnalysis(
+              errorClass = "_LEGACY_ERROR_TEMP_2410",
+              messageParameters = Map(
+                "wf" -> wf.prettyName,
+                "w" -> w.toString))
 
           case w: WindowExpression =>
             // Only allow window functions with an aggregate expression or an 
offset window
@@ -268,12 +298,16 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
                 if w.windowSpec.orderSpec.nonEmpty || 
w.windowSpec.frameSpecification !=
                     SpecifiedWindowFrame(RowFrame, UnboundedPreceding, 
UnboundedFollowing) =>
                 failAnalysis(
-                  s"Cannot specify order by or frame for 
'${agg.aggregateFunction.prettyName}'.")
+                  errorClass = "_LEGACY_ERROR_TEMP_2411",
+                  messageParameters = Map(
+                    "aggFunc" -> agg.aggregateFunction.prettyName))
               case _: AggregateExpression | _: FrameLessOffsetWindowFunction |
                   _: AggregateWindowFunction => // OK
               case f: PythonUDF if PythonUDF.isWindowPandasUDF(f) => // OK
               case other =>
-                failAnalysis(s"Expression '$other' not supported within a 
window function.")
+                failAnalysis(
+                  errorClass = "_LEGACY_ERROR_TEMP_2412",
+                  messageParameters = Map("sqlExpr" -> other.toString))
             }
 
           case s: SubqueryExpression =>
@@ -281,7 +315,8 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
 
           case e: ExpressionWithRandomSeed if !e.seedExpression.foldable =>
             failAnalysis(
-              s"Input argument to ${e.prettyName} must be a constant.")
+              errorClass = "_LEGACY_ERROR_TEMP_2413",
+              messageParameters = Map("argName" -> e.prettyName))
 
           case _ =>
         })
@@ -294,31 +329,43 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
               case _: TimestampType =>
               case _ =>
                 failAnalysis(
-                  s"Event time must be defined on a window or a timestamp, but 
" +
-                  s"${etw.eventTime.name} is of type 
${etw.eventTime.dataType.catalogString}")
+                  errorClass = "_LEGACY_ERROR_TEMP_2414",
+                  messageParameters = Map(
+                    "evName" -> etw.eventTime.name,
+                    "evType" -> etw.eventTime.dataType.catalogString))
             }
           case f: Filter if f.condition.dataType != BooleanType =>
             failAnalysis(
-              s"filter expression '${f.condition.sql}' " +
-                s"of type ${f.condition.dataType.catalogString} is not a 
boolean.")
+              errorClass = "_LEGACY_ERROR_TEMP_2415",
+              messageParameters = Map(
+                "filter" -> f.condition.sql,
+                "type" -> f.condition.dataType.catalogString))
 
           case j @ Join(_, _, _, Some(condition), _) if condition.dataType != 
BooleanType =>
             failAnalysis(
-              s"join condition '${condition.sql}' " +
-                s"of type ${condition.dataType.catalogString} is not a 
boolean.")
+              errorClass = "_LEGACY_ERROR_TEMP_2416",
+              messageParameters = Map(
+                "join" -> condition.sql,
+                "type" -> condition.dataType.catalogString))
 
           case j @ AsOfJoin(_, _, _, Some(condition), _, _, _)
               if condition.dataType != BooleanType =>
             failAnalysis(
-              s"join condition '${condition.sql}' " +
-                s"of type ${condition.dataType.catalogString} is not a 
boolean.")
+              errorClass = "_LEGACY_ERROR_TEMP_2417",
+              messageParameters = Map(
+                "condition" -> condition.sql,
+                "dataType" -> condition.dataType.catalogString))
 
           case j @ AsOfJoin(_, _, _, _, _, _, Some(toleranceAssertion)) =>
             if (!toleranceAssertion.foldable) {
-              failAnalysis("Input argument tolerance must be a constant.")
+              failAnalysis(
+                errorClass = "_LEGACY_ERROR_TEMP_2418",
+                messageParameters = Map.empty)
             }
             if (!toleranceAssertion.eval().asInstanceOf[Boolean]) {
-              failAnalysis("Input argument tolerance must be non-negative.")
+              failAnalysis(
+                errorClass = "_LEGACY_ERROR_TEMP_2419",
+                messageParameters = Map.empty)
             }
 
           case Aggregate(groupingExprs, aggregateExprs, _) =>
@@ -332,16 +379,15 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
                   child.foreach {
                     case expr: Expression if 
AggregateExpression.isAggregate(expr) =>
                       failAnalysis(
-                        s"It is not allowed to use an aggregate function in 
the argument of " +
-                          s"another aggregate function. Please use the inner 
aggregate function " +
-                          s"in a sub-query.")
+                        errorClass = "_LEGACY_ERROR_TEMP_2420",
+                        messageParameters = Map.empty)
                     case other => // OK
                   }
 
                   if (!child.deterministic) {
                     failAnalysis(
-                      s"nondeterministic expression ${expr.sql} should not " +
-                        s"appear in the arguments of an aggregate function.")
+                      errorClass = "_LEGACY_ERROR_TEMP_2421",
+                      messageParameters = Map("sqlExpr" -> expr.sql))
                   }
                 }
               case e: Attribute if groupingExprs.isEmpty =>
@@ -350,20 +396,17 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
                   case a: AggregateExpression => a
                 }.nonEmpty)
                 failAnalysis(
-                  s"grouping expressions sequence is empty, " +
-                    s"and '${e.sql}' is not an aggregate function. " +
-                    s"Wrap '${aggExprs.map(_.sql).mkString("(", ", ", ")")}' 
in windowing " +
-                    s"function(s) or wrap '${e.sql}' in first() (or 
first_value) " +
-                    s"if you don't care which value you get."
-                )
+                  errorClass = "_LEGACY_ERROR_TEMP_2422",
+                  messageParameters = Map(
+                    "sqlExpr" -> e.sql,
+                    "aggExprs" -> aggExprs.map(_.sql).mkString("(", ", ", 
")")))
               case e: Attribute if !groupingExprs.exists(_.semanticEquals(e)) 
=>
                 throw QueryCompilationErrors.columnNotInGroupByClauseError(e)
               case s: ScalarSubquery
                   if s.children.nonEmpty && 
!groupingExprs.exists(_.semanticEquals(s)) =>
-                failAnalysis(s"Correlated scalar subquery '${s.sql}' is 
neither " +
-                  "present in the group by, nor in an aggregate function. Add 
it to group by " +
-                  "using ordinal position or wrap it in first() (or 
first_value) if you don't " +
-                  "care which value you get.")
+                failAnalysis(
+                  errorClass = "_LEGACY_ERROR_TEMP_2423",
+                  messageParameters = Map("sqlExpr" -> s.sql))
               case e if groupingExprs.exists(_.semanticEquals(e)) => // OK
               case e => e.children.foreach(checkValidAggregateExpression)
             }
@@ -371,23 +414,26 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
             def checkValidGroupingExprs(expr: Expression): Unit = {
               if (expr.exists(_.isInstanceOf[AggregateExpression])) {
                 failAnalysis(
-                  "aggregate functions are not allowed in GROUP BY, but found 
" + expr.sql)
+                  errorClass = "_LEGACY_ERROR_TEMP_2424",
+                  messageParameters = Map("sqlExpr" -> expr.sql))
               }
 
               // Check if the data type of expr is orderable.
               if (!RowOrdering.isOrderable(expr.dataType)) {
                 failAnalysis(
-                  s"expression ${expr.sql} cannot be used as a grouping 
expression " +
-                    s"because its data type ${expr.dataType.catalogString} is 
not an orderable " +
-                    s"data type.")
+                  errorClass = "_LEGACY_ERROR_TEMP_2425",
+                  messageParameters = Map(
+                    "sqlExpr" -> expr.sql,
+                    "dataType" -> expr.dataType.catalogString))
               }
 
               if (!expr.deterministic) {
                 // This is just a sanity check, our analysis rule 
PullOutNondeterministic should
                 // already pull out those nondeterministic expressions and 
evaluate them in
                 // a Project node.
-                failAnalysis(s"nondeterministic expression ${expr.sql} should 
not " +
-                  s"appear in grouping expression.")
+                failAnalysis(
+                  errorClass = "_LEGACY_ERROR_TEMP_2426",
+                  messageParameters = Map("sqlExpr" -> expr.sql))
               }
             }
 
@@ -456,7 +502,8 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
             orders.foreach { order =>
               if (!RowOrdering.isOrderable(order.dataType)) {
                 failAnalysis(
-                  s"sorting is not supported for columns of type 
${order.dataType.catalogString}")
+                  errorClass = "_LEGACY_ERROR_TEMP_2427",
+                  messageParameters = Map("type" -> 
order.dataType.catalogString))
               }
             }
 
@@ -470,11 +517,10 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
                 val offset = offsetExpr.eval().asInstanceOf[Int]
                 if (Int.MaxValue - limit < offset) {
                   failAnalysis(
-                    s"""
-                       |The sum of the LIMIT clause and the OFFSET clause must 
not be greater than
-                       |the maximum 32-bit integer value (2,147,483,647),
-                       |but found limit = $limit, offset = $offset.
-                       |""".stripMargin.replace("\n", " "))
+                    errorClass = "_LEGACY_ERROR_TEMP_2428",
+                    messageParameters = Map(
+                      "limit" -> limit.toString,
+                      "offset" -> offset.toString))
                 }
               case _ =>
             }
@@ -496,11 +542,12 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
               // Check the number of columns
               if (child.output.length != ref.length) {
                 failAnalysis(
-                  s"""
-                    |${operator.nodeName} can only be performed on tables with 
the same number
-                    |of columns, but the first table has ${ref.length} columns 
and
-                    |the ${ordinalNumber(ti + 1)} table has 
${child.output.length} columns
-                  """.stripMargin.replace("\n", " ").trim())
+                  errorClass = "_LEGACY_ERROR_TEMP_2429",
+                  messageParameters = Map(
+                    "operator" -> operator.nodeName,
+                    "firstColNum" -> ref.length.toString,
+                    "nTab" -> ordinalNumber(ti + 1),
+                    "nColNum" -> child.output.length.toString))
               }
 
               val dataTypesAreCompatibleFn = 
getDataTypesAreCompatibleFn(operator)
@@ -515,7 +562,15 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
                        |${ordinalNumber(ti + 1)} table is ${dt1.catalogString} 
type which is not
                        |compatible with ${dt2.catalogString} at the same 
column of the first table
                     """.stripMargin.replace("\n", " ").trim()
-                  failAnalysis(errorMessage + 
extraHintForAnsiTypeCoercionPlan(operator))
+                  failAnalysis(
+                    errorClass = "_LEGACY_ERROR_TEMP_2430",
+                    messageParameters = Map(
+                      "operator" -> operator.nodeName,
+                      "ci" -> ordinalNumber(ci),
+                      "ti" -> ordinalNumber(ti + 1),
+                      "dt1" -> dt1.catalogString,
+                      "dt2" -> dt2.catalogString,
+                      "hint" -> extraHintForAnsiTypeCoercionPlan(operator)))
                 }
               }
             }
@@ -527,12 +582,15 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
                 case Some(_) =>
                   None
                 case _ =>
-                  Some(s"${column.quoted} is missing or is in a map or array")
+                  Some(column.quoted)
               }
             }
 
             if (badReferences.nonEmpty) {
-              failAnalysis(s"Invalid partitioning: ${badReferences.mkString(", 
")}")
+              failAnalysis(
+                errorClass = "_LEGACY_ERROR_TEMP_2431",
+                messageParameters = Map(
+                  "cols" -> badReferences.mkString(", ")))
             }
 
             create.tableSchema.foreach(f => 
TypeUtils.failWithIntervalType(f.dataType))
@@ -566,12 +624,15 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
               msgForMissingAttributes
             }
 
-            failAnalysis(msg)
+            failAnalysis(
+              errorClass = "_LEGACY_ERROR_TEMP_2432",
+              messageParameters = Map("msg" -> msg))
 
           case p @ Project(exprs, _) if containsMultipleGenerators(exprs) =>
             failAnalysis(
-              s"""Only a single table generating function is allowed in a 
SELECT clause, found:
-                 | ${exprs.map(_.sql).mkString(",")}""".stripMargin)
+              errorClass = "_LEGACY_ERROR_TEMP_2433",
+              messageParameters = Map(
+                "sqlExprs" -> exprs.map(_.sql).mkString(",")))
 
           case p @ Project(projectList, _) =>
             projectList.foreach(_.transformDownWithPruning(
@@ -583,46 +644,44 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
           case j: Join if !j.duplicateResolved =>
             val conflictingAttributes = 
j.left.outputSet.intersect(j.right.outputSet)
             failAnalysis(
-              s"""
-                 |Failure when resolving conflicting references in Join:
-                 |$plan
-                 |Conflicting attributes: 
${conflictingAttributes.mkString(",")}
-                 |""".stripMargin)
+              errorClass = "_LEGACY_ERROR_TEMP_2434",
+              messageParameters = Map(
+                "plan" -> plan.toString,
+                "conflictingAttributes" -> 
conflictingAttributes.mkString(",")))
 
           case i: Intersect if !i.duplicateResolved =>
             val conflictingAttributes = 
i.left.outputSet.intersect(i.right.outputSet)
             failAnalysis(
-              s"""
-                 |Failure when resolving conflicting references in Intersect:
-                 |$plan
-                 |Conflicting attributes: 
${conflictingAttributes.mkString(",")}
-               """.stripMargin)
+              errorClass = "_LEGACY_ERROR_TEMP_2435",
+              messageParameters = Map(
+                "plan" -> plan.toString,
+                "conflictingAttributes" -> 
conflictingAttributes.mkString(",")))
 
           case e: Except if !e.duplicateResolved =>
             val conflictingAttributes = 
e.left.outputSet.intersect(e.right.outputSet)
             failAnalysis(
-              s"""
-                 |Failure when resolving conflicting references in Except:
-                 |$plan
-                 |Conflicting attributes: 
${conflictingAttributes.mkString(",")}
-               """.stripMargin)
+              errorClass = "_LEGACY_ERROR_TEMP_2436",
+              messageParameters = Map(
+                "plan" -> plan.toString,
+                "conflictingAttributes" -> 
conflictingAttributes.mkString(",")))
 
           case j: AsOfJoin if !j.duplicateResolved =>
             val conflictingAttributes = 
j.left.outputSet.intersect(j.right.outputSet)
             failAnalysis(
-              s"""
-                 |Failure when resolving conflicting references in AsOfJoin:
-                 |$plan
-                 |Conflicting attributes: 
${conflictingAttributes.mkString(",")}
-                 |""".stripMargin)
+              errorClass = "_LEGACY_ERROR_TEMP_2437",
+              messageParameters = Map(
+                "plan" -> plan.toString,
+                "conflictingAttributes" -> 
conflictingAttributes.mkString(",")))
 
           // TODO: although map type is not orderable, technically map type 
should be able to be
           // used in equality comparison, remove this type check once we 
support it.
           case o if mapColumnInSetOperation(o).isDefined =>
             val mapCol = mapColumnInSetOperation(o).get
-            failAnalysis("Cannot have map type columns in DataFrame which 
calls " +
-              s"set operations(intersect, except, etc.), but the type of 
column ${mapCol.name} " +
-              "is " + mapCol.dataType.catalogString)
+            failAnalysis(
+              errorClass = "_LEGACY_ERROR_TEMP_2438",
+              messageParameters = Map(
+                "colName" -> mapCol.name,
+                "dataType" -> mapCol.dataType.catalogString))
 
           case o if o.expressions.exists(!_.deterministic) &&
             !o.isInstanceOf[Project] && !o.isInstanceOf[Filter] &&
@@ -631,11 +690,10 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
             !o.isInstanceOf[LateralJoin] =>
             // The rule above is used to check Aggregate operator.
             failAnalysis(
-              s"""nondeterministic expressions are only allowed in
-                 |Project, Filter, Aggregate or Window, found:
-                 | ${o.expressions.map(_.sql).mkString(",")}
-                 |in operator 
${operator.simpleString(SQLConf.get.maxToStringFields)}
-               """.stripMargin)
+              errorClass = "_LEGACY_ERROR_TEMP_2439",
+              messageParameters = Map(
+                "sqlExprs" -> o.expressions.map(_.sql).mkString(","),
+                "operator" -> 
operator.simpleString(SQLConf.get.maxToStringFields)))
 
           case _: UnresolvedHint => throw new IllegalStateException(
             "Logical hint operator should be removed during analysis.")
@@ -644,20 +702,19 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
             if PlanHelper.specialExpressionsInUnsupportedOperator(f).nonEmpty 
=>
             val invalidExprSqls = 
PlanHelper.specialExpressionsInUnsupportedOperator(f).map(_.sql)
             failAnalysis(
-              s"""
-                 |Aggregate/Window/Generate expressions are not valid in where 
clause of the query.
-                 |Expression in where clause: [${condition.sql}]
-                 |Invalid expressions: [${invalidExprSqls.mkString(", 
")}]""".stripMargin)
+              errorClass = "_LEGACY_ERROR_TEMP_2440",
+              messageParameters = Map(
+                "condition" -> condition.sql,
+                "invalidExprSqls" -> invalidExprSqls.mkString(", ")))
 
           case other if 
PlanHelper.specialExpressionsInUnsupportedOperator(other).nonEmpty =>
             val invalidExprSqls =
               
PlanHelper.specialExpressionsInUnsupportedOperator(other).map(_.sql)
             failAnalysis(
-              s"""
-                 |The query operator `${other.nodeName}` contains one or more 
unsupported
-                 |expression types Aggregate, Window or Generate.
-                 |Invalid expressions: [${invalidExprSqls.mkString(", 
")}]""".stripMargin
-            )
+              errorClass = "_LEGACY_ERROR_TEMP_2441",
+              messageParameters = Map(
+                "operator" -> other.nodeName,
+                "invalidExprSqls" -> invalidExprSqls.mkString(", ")))
 
           case _ => // Analysis successful!
         }
@@ -666,7 +723,9 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
     extendedCheckRules.foreach(_(plan))
     plan.foreachUp {
       case o if !o.resolved =>
-        failAnalysis(s"unresolved operator 
${o.simpleString(SQLConf.get.maxToStringFields)}")
+        failAnalysis(
+          errorClass = "_LEGACY_ERROR_TEMP_2442",
+          messageParameters = Map("operator" -> 
o.simpleString(SQLConf.get.maxToStringFields)))
       case _ =>
     }
 
@@ -941,7 +1000,10 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog {
               // of a CTE that is used multiple times or a self join.
               if (!metrics.sameResult(other)) {
                 failAnalysis(
-                  s"Multiple definitions of observed metrics named '$name': 
$plan")
+                  errorClass = "_LEGACY_ERROR_TEMP_2443",
+                  messageParameters = Map(
+                    "name" -> name,
+                    "plan" -> plan.toString))
               }
             case None =>
               metricsMap.put(name, metrics)
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
index 492db3785ae..9a170100f0b 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
@@ -644,13 +644,11 @@ class AnalysisErrorSuite extends AnalysisTest {
     "The offset expression must be equal to or greater than 0, but got -1" :: 
Nil
   )
 
-  errorTest(
+  errorClassTest(
     "the sum of num_rows in limit clause and num_rows in offset clause less 
than Int.MaxValue",
     testRelation.offset(Literal(2000000000, 
IntegerType)).limit(Literal(1000000000, IntegerType)),
-    "The sum of the LIMIT clause and the OFFSET clause must not be greater 
than" +
-      " the maximum 32-bit integer value (2,147,483,647)," +
-      " but found limit = 1000000000, offset = 2000000000." :: Nil
-  )
+    "_LEGACY_ERROR_TEMP_2428",
+    Map("limit" -> "1000000000", "offset" -> "2000000000"))
 
   errorTest(
     "more than one generators in SELECT",
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/CreateTablePartitioningValidationSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/CreateTablePartitioningValidationSuite.scala
index 941d0209ea6..67441e18b0f 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/CreateTablePartitioningValidationSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/CreateTablePartitioningValidationSuite.scala
@@ -91,10 +91,9 @@ class CreateTablePartitioningValidationSuite extends 
AnalysisTest {
       ignoreIfExists = false)
 
     assert(!plan.resolved)
-    assertAnalysisError(plan, Seq(
-      "Invalid partitioning",
-      "point.z is missing or is in a map or array",
-      "does_not_exist is missing or is in a map or array"))
+    assertAnalysisErrorClass(plan,
+      expectedErrorClass = "_LEGACY_ERROR_TEMP_2431",
+      expectedMessageParameters = Map("cols" -> "does_not_exist, point.z"))
   }
 
   test("CreateTableAsSelect: success with top-level column") {
diff --git a/sql/core/src/test/resources/sql-tests/results/except-all.sql.out 
b/sql/core/src/test/resources/sql-tests/results/except-all.sql.out
index a6902d06cc2..ea3968fdbdf 100644
--- a/sql/core/src/test/resources/sql-tests/results/except-all.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/except-all.sql.out
@@ -138,7 +138,17 @@ SELECT array(1)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-ExceptAll can only be performed on tables with compatible column types. The 
first column of the second table is array<int> type which is not compatible 
with int at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "array<int>",
+    "dt2" : "int",
+    "hint" : "",
+    "operator" : "ExceptAll",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -210,7 +220,15 @@ SELECT k, v FROM tab4
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-ExceptAll can only be performed on tables with the same number of columns, but 
the first table has 1 columns and the second table has 2 columns
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2429",
+  "messageParameters" : {
+    "firstColNum" : "1",
+    "nColNum" : "2",
+    "nTab" : "second",
+    "operator" : "ExceptAll"
+  }
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/group-analytics.sql.out 
b/sql/core/src/test/resources/sql-tests/results/group-analytics.sql.out
index 8ca6b43bc0d..4ca429ce7bc 100644
--- a/sql/core/src/test/resources/sql-tests/results/group-analytics.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/group-analytics.sql.out
@@ -465,7 +465,9 @@ SELECT course, year, GROUPING(course) FROM courseSales 
GROUP BY course, year
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-grouping() can only be used with GroupingSets/Cube/Rollup
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2445"
+}
 
 
 -- !query
@@ -474,7 +476,9 @@ SELECT course, year, GROUPING_ID(course, year) FROM 
courseSales GROUP BY course,
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-grouping_id() can only be used with GroupingSets/Cube/Rollup
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2407"
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/group-by-filter.sql.out 
b/sql/core/src/test/resources/sql-tests/results/group-by-filter.sql.out
index b458ea6a2bf..7d2191dfa8a 100644
--- a/sql/core/src/test/resources/sql-tests/results/group-by-filter.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/group-by-filter.sql.out
@@ -48,7 +48,13 @@ SELECT a, COUNT(b) FILTER (WHERE a >= 2) FROM testData
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-grouping expressions sequence is empty, and 'testdata.a' is not an aggregate 
function. Wrap '(count(testdata.b) FILTER (WHERE (testdata.a >= 2)) AS 
`count(b) FILTER (WHERE (a >= 2))`)' in windowing function(s) or wrap 
'testdata.a' in first() (or first_value) if you don't care which value you get.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2422",
+  "messageParameters" : {
+    "aggExprs" : "(count(testdata.b) FILTER (WHERE (testdata.a >= 2)) AS 
`count(b) FILTER (WHERE (a >= 2))`)",
+    "sqlExpr" : "testdata.a"
+  }
+}
 
 
 -- !query
diff --git a/sql/core/src/test/resources/sql-tests/results/group-by.sql.out 
b/sql/core/src/test/resources/sql-tests/results/group-by.sql.out
index 324b2bb2d8d..578fa1588b3 100644
--- a/sql/core/src/test/resources/sql-tests/results/group-by.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/group-by.sql.out
@@ -15,7 +15,13 @@ SELECT a, COUNT(b) FROM testData
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-grouping expressions sequence is empty, and 'testdata.a' is not an aggregate 
function. Wrap '(count(testdata.b) AS `count(b)`)' in windowing function(s) or 
wrap 'testdata.a' in first() (or first_value) if you don't care which value you 
get.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2422",
+  "messageParameters" : {
+    "aggExprs" : "(count(testdata.b) AS `count(b)`)",
+    "sqlExpr" : "testdata.a"
+  }
+}
 
 
 -- !query
@@ -199,7 +205,12 @@ SELECT COUNT(b) AS k FROM testData GROUP BY k
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-aggregate functions are not allowed in GROUP BY, but found count(testdata.b)
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2424",
+  "messageParameters" : {
+    "sqlExpr" : "count(testdata.b)"
+  }
+}
 
 
 -- !query
@@ -326,7 +337,13 @@ SELECT id FROM range(10) HAVING id > 0
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-grouping expressions sequence is empty, and 'id' is not an aggregate function. 
Wrap '()' in windowing function(s) or wrap 'id' in first() (or first_value) if 
you don't care which value you get.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2422",
+  "messageParameters" : {
+    "aggExprs" : "()",
+    "sqlExpr" : "id"
+  }
+}
 
 
 -- !query
@@ -360,10 +377,13 @@ SELECT 1 FROM range(10) HAVING MAX(id) > 0
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-
-Aggregate/Window/Generate expressions are not valid in where clause of the 
query.
-Expression in where clause: [(max(id) > CAST(0 AS BIGINT))]
-Invalid expressions: [max(id)]
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2440",
+  "messageParameters" : {
+    "condition" : "(max(id) > CAST(0 AS BIGINT))",
+    "invalidExprSqls" : "max(id)"
+  }
+}
 
 
 -- !query
@@ -754,10 +774,13 @@ SELECT count(*) FROM test_agg WHERE count(*) > 1L
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-
-Aggregate/Window/Generate expressions are not valid in where clause of the 
query.
-Expression in where clause: [(count(1) > 1L)]
-Invalid expressions: [count(1)]
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2440",
+  "messageParameters" : {
+    "condition" : "(count(1) > 1L)",
+    "invalidExprSqls" : "count(1)"
+  }
+}
 
 
 -- !query
@@ -766,10 +789,13 @@ SELECT count(*) FROM test_agg WHERE count(*) + 1L > 1L
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-
-Aggregate/Window/Generate expressions are not valid in where clause of the 
query.
-Expression in where clause: [((count(1) + 1L) > 1L)]
-Invalid expressions: [count(1)]
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2440",
+  "messageParameters" : {
+    "condition" : "((count(1) + 1L) > 1L)",
+    "invalidExprSqls" : "count(1)"
+  }
+}
 
 
 -- !query
@@ -778,10 +804,13 @@ SELECT count(*) FROM test_agg WHERE k = 1 or k = 2 or 
count(*) + 1L > 1L or max(
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-
-Aggregate/Window/Generate expressions are not valid in where clause of the 
query.
-Expression in where clause: [(((test_agg.k = 1) OR (test_agg.k = 2)) OR 
(((count(1) + 1L) > 1L) OR (max(test_agg.k) > 1)))]
-Invalid expressions: [count(1), max(test_agg.k)]
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2440",
+  "messageParameters" : {
+    "condition" : "(((test_agg.k = 1) OR (test_agg.k = 2)) OR (((count(1) + 
1L) > 1L) OR (max(test_agg.k) > 1)))",
+    "invalidExprSqls" : "count(1), max(test_agg.k)"
+  }
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/intersect-all.sql.out 
b/sql/core/src/test/resources/sql-tests/results/intersect-all.sql.out
index b439f79562a..263476c911f 100644
--- a/sql/core/src/test/resources/sql-tests/results/intersect-all.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/intersect-all.sql.out
@@ -95,7 +95,17 @@ SELECT array(1), 2
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-IntersectAll can only be performed on tables with compatible column types. The 
first column of the second table is array<int> type which is not compatible 
with int at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "array<int>",
+    "dt2" : "int",
+    "hint" : "",
+    "operator" : "IntersectAll",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -106,7 +116,15 @@ SELECT k, v FROM tab2
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-IntersectAll can only be performed on tables with the same number of columns, 
but the first table has 1 columns and the second table has 2 columns
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2429",
+  "messageParameters" : {
+    "firstColNum" : "1",
+    "nColNum" : "2",
+    "nTab" : "second",
+    "operator" : "IntersectAll"
+  }
+}
 
 
 -- !query
diff --git a/sql/core/src/test/resources/sql-tests/results/limit.sql.out 
b/sql/core/src/test/resources/sql-tests/results/limit.sql.out
index 7d1c1e2b34d..c5320bf4869 100644
--- a/sql/core/src/test/resources/sql-tests/results/limit.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/limit.sql.out
@@ -50,7 +50,13 @@ SELECT * FROM testdata LIMIT -1
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-The limit expression must be equal to or greater than 0, but got -1
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2403",
+  "messageParameters" : {
+    "name" : "limit",
+    "v" : "-1"
+  }
+}
 
 
 -- !query
@@ -59,7 +65,13 @@ SELECT * FROM testData TABLESAMPLE (-1 ROWS)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-The limit expression must be equal to or greater than 0, but got -1
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2403",
+  "messageParameters" : {
+    "name" : "limit",
+    "v" : "-1"
+  }
+}
 
 
 -- !query
@@ -76,7 +88,13 @@ SELECT * FROM testdata LIMIT CAST(NULL AS INT)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-The evaluated limit expression must not be null, but got CAST(NULL AS INT)
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2402",
+  "messageParameters" : {
+    "limitExpr" : "CAST(NULL AS INT)",
+    "name" : "limit"
+  }
+}
 
 
 -- !query
@@ -85,7 +103,13 @@ SELECT * FROM testdata LIMIT key > 3
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-The limit expression must evaluate to a constant value, but got 
(spark_catalog.default.testdata.key > 3)
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2400",
+  "messageParameters" : {
+    "limitExpr" : "(spark_catalog.default.testdata.key > 3)",
+    "name" : "limit"
+  }
+}
 
 
 -- !query
@@ -94,7 +118,13 @@ SELECT * FROM testdata LIMIT true
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-The limit expression must be integer type, but got boolean
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2401",
+  "messageParameters" : {
+    "dataType" : "boolean",
+    "name" : "limit"
+  }
+}
 
 
 -- !query
@@ -103,7 +133,13 @@ SELECT * FROM testdata LIMIT 'a'
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-The limit expression must be integer type, but got string
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2401",
+  "messageParameters" : {
+    "dataType" : "string",
+    "name" : "limit"
+  }
+}
 
 
 -- !query
diff --git a/sql/core/src/test/resources/sql-tests/results/percentiles.sql.out 
b/sql/core/src/test/resources/sql-tests/results/percentiles.sql.out
index c1e11681728..4c5af9caf4a 100644
--- a/sql/core/src/test/resources/sql-tests/results/percentiles.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/percentiles.sql.out
@@ -178,7 +178,12 @@ ORDER BY salary
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Cannot specify order by or frame for 'percentile_cont'.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2411",
+  "messageParameters" : {
+    "aggFunc" : "percentile_cont"
+  }
+}
 
 
 -- !query
@@ -194,7 +199,12 @@ ORDER BY salary
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Cannot specify order by or frame for 'percentile_disc'.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2411",
+  "messageParameters" : {
+    "aggFunc" : "percentile_disc"
+  }
+}
 
 
 -- !query
@@ -209,7 +219,12 @@ ORDER BY salary
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Cannot specify order by or frame for 'median'.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2411",
+  "messageParameters" : {
+    "aggFunc" : "median"
+  }
+}
 
 
 -- !query
@@ -225,7 +240,12 @@ ORDER BY salary
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Cannot specify order by or frame for 'percentile_cont'.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2411",
+  "messageParameters" : {
+    "aggFunc" : "percentile_cont"
+  }
+}
 
 
 -- !query
@@ -241,7 +261,12 @@ ORDER BY salary
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Cannot specify order by or frame for 'percentile_disc'.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2411",
+  "messageParameters" : {
+    "aggFunc" : "percentile_disc"
+  }
+}
 
 
 -- !query
@@ -256,7 +281,12 @@ ORDER BY salary
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Cannot specify order by or frame for 'median'.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2411",
+  "messageParameters" : {
+    "aggFunc" : "median"
+  }
+}
 
 
 -- !query
@@ -336,7 +366,12 @@ ORDER BY salary
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Cannot specify order by or frame for 'percentile_cont'.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2411",
+  "messageParameters" : {
+    "aggFunc" : "percentile_cont"
+  }
+}
 
 
 -- !query
@@ -353,7 +388,12 @@ ORDER BY salary
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Cannot specify order by or frame for 'percentile_disc'.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2411",
+  "messageParameters" : {
+    "aggFunc" : "percentile_disc"
+  }
+}
 
 
 -- !query
@@ -369,7 +409,12 @@ ORDER BY salary
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Cannot specify order by or frame for 'median'.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2411",
+  "messageParameters" : {
+    "aggFunc" : "median"
+  }
+}
 
 
 -- !query
@@ -386,7 +431,12 @@ ORDER BY salary
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Cannot specify order by or frame for 'percentile_cont'.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2411",
+  "messageParameters" : {
+    "aggFunc" : "percentile_cont"
+  }
+}
 
 
 -- !query
@@ -403,7 +453,12 @@ ORDER BY salary
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Cannot specify order by or frame for 'percentile_disc'.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2411",
+  "messageParameters" : {
+    "aggFunc" : "percentile_disc"
+  }
+}
 
 
 -- !query
@@ -419,7 +474,12 @@ ORDER BY salary
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Cannot specify order by or frame for 'median'.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2411",
+  "messageParameters" : {
+    "aggFunc" : "median"
+  }
+}
 
 
 -- !query
diff --git a/sql/core/src/test/resources/sql-tests/results/pivot.sql.out 
b/sql/core/src/test/resources/sql-tests/results/pivot.sql.out
index 564a305efbf..bd172f9843c 100644
--- a/sql/core/src/test/resources/sql-tests/results/pivot.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/pivot.sql.out
@@ -283,7 +283,9 @@ PIVOT (
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-It is not allowed to use an aggregate function in the argument of another 
aggregate function. Please use the inner aggregate function in a sub-query.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2420"
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part3.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part3.sql.out
index 450dd5ca743..027cef20596 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part3.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part3.sql.out
@@ -5,7 +5,9 @@ select max(min(unique1)) from tenk1
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-It is not allowed to use an aggregate function in the argument of another 
aggregate function. Please use the inner aggregate function in a sub-query.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2420"
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/limit.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/limit.sql.out
index ece34bf3f1c..754ef794e83 100644
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/limit.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/limit.sql.out
@@ -131,7 +131,13 @@ select * from int8_tbl limit (case when random() < 0.5 
then bigint(null) end)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-The limit expression must evaluate to a constant value, but got CASE WHEN 
(_nondeterministic < CAST(0.5BD AS DOUBLE)) THEN CAST(NULL AS BIGINT) END
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2400",
+  "messageParameters" : {
+    "limitExpr" : "CASE WHEN (_nondeterministic < CAST(0.5BD AS DOUBLE)) THEN 
CAST(NULL AS BIGINT) END",
+    "name" : "limit"
+  }
+}
 
 
 -- !query
@@ -140,7 +146,13 @@ select * from int8_tbl offset (case when random() < 0.5 
then bigint(null) end)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-The offset expression must evaluate to a constant value, but got CASE WHEN 
(_nondeterministic < CAST(0.5BD AS DOUBLE)) THEN CAST(NULL AS BIGINT) END
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2400",
+  "messageParameters" : {
+    "limitExpr" : "CASE WHEN (_nondeterministic < CAST(0.5BD AS DOUBLE)) THEN 
CAST(NULL AS BIGINT) END",
+    "name" : "offset"
+  }
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
index d005c120815..909e3feb25a 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out
@@ -140,7 +140,13 @@ SELECT a FROM test_having HAVING min(a) < max(a)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-grouping expressions sequence is empty, and 
'spark_catalog.default.test_having.a' is not an aggregate function. Wrap 
'(min(spark_catalog.default.test_having.a) AS `min(a#x)`, 
max(spark_catalog.default.test_having.a) AS `max(a#x)`)' in windowing 
function(s) or wrap 'spark_catalog.default.test_having.a' in first() (or 
first_value) if you don't care which value you get.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2422",
+  "messageParameters" : {
+    "aggExprs" : "(min(spark_catalog.default.test_having.a) AS `min(a#x)`, 
max(spark_catalog.default.test_having.a) AS `max(a#x)`)",
+    "sqlExpr" : "spark_catalog.default.test_having.a"
+  }
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out
index ab8989e073a..5138e6adab2 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out
@@ -331,10 +331,13 @@ SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() 
OVER (ORDER BY salary)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-
-The query operator `Join` contains one or more unsupported
-expression types Aggregate, Window or Generate.
-Invalid expressions: [row_number() OVER (ORDER BY 
spark_catalog.default.empsalary.salary ASC NULLS FIRST ROWS BETWEEN UNBOUNDED 
PRECEDING AND CURRENT ROW)]
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2441",
+  "messageParameters" : {
+    "invalidExprSqls" : "row_number() OVER (ORDER BY 
spark_catalog.default.empsalary.salary ASC NULLS FIRST ROWS BETWEEN UNBOUNDED 
PRECEDING AND CURRENT ROW)",
+    "operator" : "Join"
+  }
+}
 
 
 -- !query
@@ -343,10 +346,13 @@ SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary 
GROUP BY 1
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-
-The query operator `Aggregate` contains one or more unsupported
-expression types Aggregate, Window or Generate.
-Invalid expressions: [RANK() OVER (ORDER BY 1 ASC NULLS FIRST ROWS BETWEEN 
UNBOUNDED PRECEDING AND CURRENT ROW)]
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2441",
+  "messageParameters" : {
+    "invalidExprSqls" : "RANK() OVER (ORDER BY 1 ASC NULLS FIRST ROWS BETWEEN 
UNBOUNDED PRECEDING AND CURRENT ROW)",
+    "operator" : "Aggregate"
+  }
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/invalid-correlation.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/invalid-correlation.sql.out
index 4d0d5c2a190..4d2c9b99d8a 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/invalid-correlation.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/invalid-correlation.sql.out
@@ -43,7 +43,13 @@ AND    t2b = (SELECT max(avg)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-grouping expressions sequence is empty, and 't2.t2b' is not an aggregate 
function. Wrap '(avg(t2.t2b) AS avg)' in windowing function(s) or wrap 't2.t2b' 
in first() (or first_value) if you don't care which value you get.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2422",
+  "messageParameters" : {
+    "aggExprs" : "(avg(t2.t2b) AS avg)",
+    "sqlExpr" : "t2.t2b"
+  }
+}
 
 
 -- !query
@@ -60,7 +66,12 @@ WHERE  t1a IN (SELECT   min(t2a)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Resolved attribute(s) t2b#x missing from min(t2a)#x,t2c#x in operator !Filter 
t2c#x IN (list#x [t2b#x]).
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2432",
+  "messageParameters" : {
+    "msg" : "Resolved attribute(s) t2b#x missing from min(t2a)#x,t2c#x in 
operator !Filter t2c#x IN (list#x [t2b#x])."
+  }
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/typeCoercion/native/widenSetOperationTypes.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/typeCoercion/native/widenSetOperationTypes.sql.out
index 34c46c1a2c0..260480d720e 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/typeCoercion/native/widenSetOperationTypes.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/typeCoercion/native/widenSetOperationTypes.sql.out
@@ -85,7 +85,17 @@ SELECT cast(1 as tinyint) FROM t UNION SELECT cast('2' as 
binary) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is binary type which is not compatible with tinyint 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "binary",
+    "dt2" : "tinyint",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -94,7 +104,17 @@ SELECT cast(1 as tinyint) FROM t UNION SELECT cast(2 as 
boolean) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is boolean type which is not compatible with tinyint 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "boolean",
+    "dt2" : "tinyint",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -103,7 +123,17 @@ SELECT cast(1 as tinyint) FROM t UNION SELECT 
cast('2017-12-11 09:30:00.0' as ti
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is timestamp type which is not compatible with 
tinyint at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "timestamp",
+    "dt2" : "tinyint",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -112,7 +142,17 @@ SELECT cast(1 as tinyint) FROM t UNION SELECT 
cast('2017-12-11 09:30:00' as date
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is date type which is not compatible with tinyint at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "date",
+    "dt2" : "tinyint",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -193,7 +233,17 @@ SELECT cast(1 as smallint) FROM t UNION SELECT cast('2' as 
binary) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is binary type which is not compatible with smallint 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "binary",
+    "dt2" : "smallint",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -202,7 +252,17 @@ SELECT cast(1 as smallint) FROM t UNION SELECT cast(2 as 
boolean) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is boolean type which is not compatible with 
smallint at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "boolean",
+    "dt2" : "smallint",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -211,7 +271,17 @@ SELECT cast(1 as smallint) FROM t UNION SELECT 
cast('2017-12-11 09:30:00.0' as t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is timestamp type which is not compatible with 
smallint at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "timestamp",
+    "dt2" : "smallint",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -220,7 +290,17 @@ SELECT cast(1 as smallint) FROM t UNION SELECT 
cast('2017-12-11 09:30:00' as dat
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is date type which is not compatible with smallint 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "date",
+    "dt2" : "smallint",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -301,7 +381,17 @@ SELECT cast(1 as int) FROM t UNION SELECT cast('2' as 
binary) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is binary type which is not compatible with int at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "binary",
+    "dt2" : "int",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -310,7 +400,17 @@ SELECT cast(1 as int) FROM t UNION SELECT cast(2 as 
boolean) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is boolean type which is not compatible with int at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "boolean",
+    "dt2" : "int",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -319,7 +419,17 @@ SELECT cast(1 as int) FROM t UNION SELECT cast('2017-12-11 
09:30:00.0' as timest
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is timestamp type which is not compatible with int 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "timestamp",
+    "dt2" : "int",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -328,7 +438,17 @@ SELECT cast(1 as int) FROM t UNION SELECT cast('2017-12-11 
09:30:00' as date) FR
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is date type which is not compatible with int at the 
same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "date",
+    "dt2" : "int",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -409,7 +529,17 @@ SELECT cast(1 as bigint) FROM t UNION SELECT cast('2' as 
binary) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is binary type which is not compatible with bigint 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "binary",
+    "dt2" : "bigint",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -418,7 +548,17 @@ SELECT cast(1 as bigint) FROM t UNION SELECT cast(2 as 
boolean) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is boolean type which is not compatible with bigint 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "boolean",
+    "dt2" : "bigint",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -427,7 +567,17 @@ SELECT cast(1 as bigint) FROM t UNION SELECT 
cast('2017-12-11 09:30:00.0' as tim
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is timestamp type which is not compatible with 
bigint at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "timestamp",
+    "dt2" : "bigint",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -436,7 +586,17 @@ SELECT cast(1 as bigint) FROM t UNION SELECT 
cast('2017-12-11 09:30:00' as date)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is date type which is not compatible with bigint at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "date",
+    "dt2" : "bigint",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -517,7 +677,17 @@ SELECT cast(1 as float) FROM t UNION SELECT cast('2' as 
binary) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is binary type which is not compatible with float at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "binary",
+    "dt2" : "float",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -526,7 +696,17 @@ SELECT cast(1 as float) FROM t UNION SELECT cast(2 as 
boolean) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is boolean type which is not compatible with float 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "boolean",
+    "dt2" : "float",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -535,7 +715,17 @@ SELECT cast(1 as float) FROM t UNION SELECT 
cast('2017-12-11 09:30:00.0' as time
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is timestamp type which is not compatible with float 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "timestamp",
+    "dt2" : "float",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -544,7 +734,17 @@ SELECT cast(1 as float) FROM t UNION SELECT 
cast('2017-12-11 09:30:00' as date)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is date type which is not compatible with float at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "date",
+    "dt2" : "float",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -625,7 +825,17 @@ SELECT cast(1 as double) FROM t UNION SELECT cast('2' as 
binary) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is binary type which is not compatible with double 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "binary",
+    "dt2" : "double",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -634,7 +844,17 @@ SELECT cast(1 as double) FROM t UNION SELECT cast(2 as 
boolean) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is boolean type which is not compatible with double 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "boolean",
+    "dt2" : "double",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -643,7 +863,17 @@ SELECT cast(1 as double) FROM t UNION SELECT 
cast('2017-12-11 09:30:00.0' as tim
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is timestamp type which is not compatible with 
double at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "timestamp",
+    "dt2" : "double",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -652,7 +882,17 @@ SELECT cast(1 as double) FROM t UNION SELECT 
cast('2017-12-11 09:30:00' as date)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is date type which is not compatible with double at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "date",
+    "dt2" : "double",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -733,7 +973,17 @@ SELECT cast(1 as decimal(10, 0)) FROM t UNION SELECT 
cast('2' as binary) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is binary type which is not compatible with 
decimal(10,0) at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "binary",
+    "dt2" : "decimal(10,0)",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -742,7 +992,17 @@ SELECT cast(1 as decimal(10, 0)) FROM t UNION SELECT 
cast(2 as boolean) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is boolean type which is not compatible with 
decimal(10,0) at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "boolean",
+    "dt2" : "decimal(10,0)",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -751,7 +1011,17 @@ SELECT cast(1 as decimal(10, 0)) FROM t UNION SELECT 
cast('2017-12-11 09:30:00.0
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is timestamp type which is not compatible with 
decimal(10,0) at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "timestamp",
+    "dt2" : "decimal(10,0)",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -760,7 +1030,17 @@ SELECT cast(1 as decimal(10, 0)) FROM t UNION SELECT 
cast('2017-12-11 09:30:00'
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is date type which is not compatible with 
decimal(10,0) at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "date",
+    "dt2" : "decimal(10,0)",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -841,7 +1121,17 @@ SELECT cast(1 as string) FROM t UNION SELECT cast('2' as 
binary) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is binary type which is not compatible with string 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "binary",
+    "dt2" : "string",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -850,7 +1140,17 @@ SELECT cast(1 as string) FROM t UNION SELECT cast(2 as 
boolean) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is boolean type which is not compatible with string 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "boolean",
+    "dt2" : "string",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -877,7 +1177,17 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as 
tinyint) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is tinyint type which is not compatible with binary 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "tinyint",
+    "dt2" : "binary",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -886,7 +1196,17 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as 
smallint) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is smallint type which is not compatible with binary 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "smallint",
+    "dt2" : "binary",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -895,7 +1215,17 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as 
int) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is int type which is not compatible with binary at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "int",
+    "dt2" : "binary",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -904,7 +1234,17 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as 
bigint) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is bigint type which is not compatible with binary 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "bigint",
+    "dt2" : "binary",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -913,7 +1253,17 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as 
float) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is float type which is not compatible with binary at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "float",
+    "dt2" : "binary",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -922,7 +1272,17 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as 
double) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is double type which is not compatible with binary 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "double",
+    "dt2" : "binary",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -931,7 +1291,17 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as 
decimal(10, 0)) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is decimal(10,0) type which is not compatible with 
binary at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "decimal(10,0)",
+    "dt2" : "binary",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -940,7 +1310,17 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as 
string) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is string type which is not compatible with binary 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "string",
+    "dt2" : "binary",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -958,7 +1338,17 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as 
boolean) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is boolean type which is not compatible with binary 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "boolean",
+    "dt2" : "binary",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -967,7 +1357,17 @@ SELECT cast('1' as binary) FROM t UNION SELECT 
cast('2017-12-11 09:30:00.0' as t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is timestamp type which is not compatible with 
binary at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "timestamp",
+    "dt2" : "binary",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -976,7 +1376,17 @@ SELECT cast('1' as binary) FROM t UNION SELECT 
cast('2017-12-11 09:30:00' as dat
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is date type which is not compatible with binary at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "date",
+    "dt2" : "binary",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -985,7 +1395,17 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as 
tinyint) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is tinyint type which is not compatible with boolean 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "tinyint",
+    "dt2" : "boolean",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -994,7 +1414,17 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as 
smallint) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is smallint type which is not compatible with 
boolean at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "smallint",
+    "dt2" : "boolean",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1003,7 +1433,17 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as 
int) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is int type which is not compatible with boolean at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "int",
+    "dt2" : "boolean",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1012,7 +1452,17 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as 
bigint) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is bigint type which is not compatible with boolean 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "bigint",
+    "dt2" : "boolean",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1021,7 +1471,17 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as 
float) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is float type which is not compatible with boolean 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "float",
+    "dt2" : "boolean",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1030,7 +1490,17 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as 
double) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is double type which is not compatible with boolean 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "double",
+    "dt2" : "boolean",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1039,7 +1509,17 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as 
decimal(10, 0)) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is decimal(10,0) type which is not compatible with 
boolean at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "decimal(10,0)",
+    "dt2" : "boolean",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1048,7 +1528,17 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as 
string) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is string type which is not compatible with boolean 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "string",
+    "dt2" : "boolean",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1057,7 +1547,17 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast('2' 
as binary) FROM t
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is binary type which is not compatible with boolean 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "binary",
+    "dt2" : "boolean",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1074,7 +1574,17 @@ SELECT cast(1 as boolean) FROM t UNION SELECT 
cast('2017-12-11 09:30:00.0' as ti
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is timestamp type which is not compatible with 
boolean at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "timestamp",
+    "dt2" : "boolean",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1083,7 +1593,17 @@ SELECT cast(1 as boolean) FROM t UNION SELECT 
cast('2017-12-11 09:30:00' as date
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is date type which is not compatible with boolean at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "date",
+    "dt2" : "boolean",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1092,7 +1612,17 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t 
UNION SELECT cast(2 as
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is tinyint type which is not compatible with 
timestamp at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "tinyint",
+    "dt2" : "timestamp",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1101,7 +1631,17 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t 
UNION SELECT cast(2 as
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is smallint type which is not compatible with 
timestamp at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "smallint",
+    "dt2" : "timestamp",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1110,7 +1650,17 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t 
UNION SELECT cast(2 as
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is int type which is not compatible with timestamp 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "int",
+    "dt2" : "timestamp",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1119,7 +1669,17 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t 
UNION SELECT cast(2 as
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is bigint type which is not compatible with 
timestamp at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "bigint",
+    "dt2" : "timestamp",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1128,7 +1688,17 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t 
UNION SELECT cast(2 as
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is float type which is not compatible with timestamp 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "float",
+    "dt2" : "timestamp",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1137,7 +1707,17 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t 
UNION SELECT cast(2 as
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is double type which is not compatible with 
timestamp at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "double",
+    "dt2" : "timestamp",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1146,7 +1726,17 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t 
UNION SELECT cast(2 as
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is decimal(10,0) type which is not compatible with 
timestamp at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "decimal(10,0)",
+    "dt2" : "timestamp",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1164,7 +1754,17 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t 
UNION SELECT cast('2' a
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is binary type which is not compatible with 
timestamp at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "binary",
+    "dt2" : "timestamp",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1173,7 +1773,17 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t 
UNION SELECT cast(2 as
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is boolean type which is not compatible with 
timestamp at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "boolean",
+    "dt2" : "timestamp",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1200,7 +1810,17 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION 
SELECT cast(2 as tinyint
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is tinyint type which is not compatible with date at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "tinyint",
+    "dt2" : "date",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1209,7 +1829,17 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION 
SELECT cast(2 as smallin
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is smallint type which is not compatible with date 
at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "smallint",
+    "dt2" : "date",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1218,7 +1848,17 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION 
SELECT cast(2 as int) FR
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is int type which is not compatible with date at the 
same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "int",
+    "dt2" : "date",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1227,7 +1867,17 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION 
SELECT cast(2 as bigint)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is bigint type which is not compatible with date at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "bigint",
+    "dt2" : "date",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1236,7 +1886,17 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION 
SELECT cast(2 as float)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is float type which is not compatible with date at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "float",
+    "dt2" : "date",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1245,7 +1905,17 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION 
SELECT cast(2 as double)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is double type which is not compatible with date at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "double",
+    "dt2" : "date",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1254,7 +1924,17 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION 
SELECT cast(2 as decimal
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is decimal(10,0) type which is not compatible with 
date at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "decimal(10,0)",
+    "dt2" : "date",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1272,7 +1952,17 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION 
SELECT cast('2' as binar
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is binary type which is not compatible with date at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "binary",
+    "dt2" : "date",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -1281,7 +1971,17 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION 
SELECT cast(2 as boolean
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-Union can only be performed on tables with compatible column types. The first 
column of the second table is boolean type which is not compatible with date at 
the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "boolean",
+    "dt2" : "date",
+    "hint" : "",
+    "operator" : "Union",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udaf/udaf-group-by-ordinal.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/udaf/udaf-group-by-ordinal.sql.out
index d6e3f111001..f9428ef8405 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/udaf/udaf-group-by-ordinal.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/udaf/udaf-group-by-ordinal.sql.out
@@ -386,7 +386,13 @@ select a, b, udaf(1) from data group by cube(1, 3)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-grouping expressions sequence is empty, and 'data.a' is not an aggregate 
function. Wrap '()' in windowing function(s) or wrap 'data.a' in first() (or 
first_value) if you don't care which value you get.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2422",
+  "messageParameters" : {
+    "aggExprs" : "()",
+    "sqlExpr" : "data.a"
+  }
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udaf/udaf-group-by.sql.out 
b/sql/core/src/test/resources/sql-tests/results/udaf/udaf-group-by.sql.out
index ec8819abfee..bc65c1d471a 100644
--- a/sql/core/src/test/resources/sql-tests/results/udaf/udaf-group-by.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/udaf/udaf-group-by.sql.out
@@ -15,7 +15,13 @@ SELECT a, udaf(b) FROM testData
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-grouping expressions sequence is empty, and 'testdata.a' is not an aggregate 
function. Wrap '()' in windowing function(s) or wrap 'testdata.a' in first() 
(or first_value) if you don't care which value you get.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2422",
+  "messageParameters" : {
+    "aggExprs" : "()",
+    "sqlExpr" : "testdata.a"
+  }
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part3.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part3.sql.out
index f44b69eaadc..d1440c86f46 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part3.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part3.sql.out
@@ -5,7 +5,9 @@ select udf(max(min(unique1))) from tenk1
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-It is not allowed to use an aggregate function in the argument of another 
aggregate function. Please use the inner aggregate function in a sub-query.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2420"
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
index 03fa37574e3..3ca153579e3 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out
@@ -140,7 +140,13 @@ SELECT udf(a) FROM test_having HAVING udf(min(a)) < 
udf(max(a))
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-grouping expressions sequence is empty, and 
'spark_catalog.default.test_having.a' is not an aggregate function. Wrap 
'(min(spark_catalog.default.test_having.a) AS `min(a#x)`, 
max(spark_catalog.default.test_having.a) AS `max(a#x)`)' in windowing 
function(s) or wrap 'spark_catalog.default.test_having.a' in first() (or 
first_value) if you don't care which value you get.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2422",
+  "messageParameters" : {
+    "aggExprs" : "(min(spark_catalog.default.test_having.a) AS `min(a#x)`, 
max(spark_catalog.default.test_having.a) AS `max(a#x)`)",
+    "sqlExpr" : "spark_catalog.default.test_having.a"
+  }
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/udf-except-all.sql.out 
b/sql/core/src/test/resources/sql-tests/results/udf/udf-except-all.sql.out
index cb125a648b7..b6796be1a71 100644
--- a/sql/core/src/test/resources/sql-tests/results/udf/udf-except-all.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/udf/udf-except-all.sql.out
@@ -138,7 +138,17 @@ SELECT array(1)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-ExceptAll can only be performed on tables with compatible column types. The 
first column of the second table is array<int> type which is not compatible 
with int at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "array<int>",
+    "dt2" : "int",
+    "hint" : "",
+    "operator" : "ExceptAll",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -210,7 +220,15 @@ SELECT k, v FROM tab4
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-ExceptAll can only be performed on tables with the same number of columns, but 
the first table has 1 columns and the second table has 2 columns
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2429",
+  "messageParameters" : {
+    "firstColNum" : "1",
+    "nColNum" : "2",
+    "nTab" : "second",
+    "operator" : "ExceptAll"
+  }
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/udf-group-analytics.sql.out 
b/sql/core/src/test/resources/sql-tests/results/udf/udf-group-analytics.sql.out
index 4a8df762c73..c78727509bb 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/udf/udf-group-analytics.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/udf/udf-group-analytics.sql.out
@@ -207,7 +207,9 @@ SELECT course, udf(year), GROUPING(course) FROM courseSales 
GROUP BY course, udf
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-grouping() can only be used with GroupingSets/Cube/Rollup
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2445"
+}
 
 
 -- !query
@@ -216,7 +218,9 @@ SELECT course, udf(year), GROUPING_ID(course, year) FROM 
courseSales GROUP BY ud
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-grouping_id() can only be used with GroupingSets/Cube/Rollup
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2407"
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/udf-group-by.sql.out 
b/sql/core/src/test/resources/sql-tests/results/udf/udf-group-by.sql.out
index 9c96f49cdc0..b075240bb19 100644
--- a/sql/core/src/test/resources/sql-tests/results/udf/udf-group-by.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/udf/udf-group-by.sql.out
@@ -15,7 +15,13 @@ SELECT udf(a), udf(COUNT(b)) FROM testData
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-grouping expressions sequence is empty, and 'testdata.a' is not an aggregate 
function. Wrap '(CAST(udf(cast(count(b) as string)) AS BIGINT) AS 
`udf(count(b))`)' in windowing function(s) or wrap 'testdata.a' in first() (or 
first_value) if you don't care which value you get.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2422",
+  "messageParameters" : {
+    "aggExprs" : "(CAST(udf(cast(count(b) as string)) AS BIGINT) AS 
`udf(count(b))`)",
+    "sqlExpr" : "testdata.a"
+  }
+}
 
 
 -- !query
@@ -176,7 +182,12 @@ SELECT udf(COUNT(b)) AS k FROM testData GROUP BY k
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-aggregate functions are not allowed in GROUP BY, but found 
CAST(udf(cast(count(b) as string)) AS BIGINT)
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2424",
+  "messageParameters" : {
+    "sqlExpr" : "CAST(udf(cast(count(b) as string)) AS BIGINT)"
+  }
+}
 
 
 -- !query
@@ -303,7 +314,13 @@ SELECT udf(id) FROM range(10) HAVING id > 0
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-grouping expressions sequence is empty, and 'id' is not an aggregate function. 
Wrap '()' in windowing function(s) or wrap 'id' in first() (or first_value) if 
you don't care which value you get.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2422",
+  "messageParameters" : {
+    "aggExprs" : "()",
+    "sqlExpr" : "id"
+  }
+}
 
 
 -- !query
@@ -586,10 +603,13 @@ SELECT udf(count(*)) FROM test_agg WHERE count(*) > 1L
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-
-Aggregate/Window/Generate expressions are not valid in where clause of the 
query.
-Expression in where clause: [(count(1) > 1L)]
-Invalid expressions: [count(1)]
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2440",
+  "messageParameters" : {
+    "condition" : "(count(1) > 1L)",
+    "invalidExprSqls" : "count(1)"
+  }
+}
 
 
 -- !query
@@ -598,10 +618,13 @@ SELECT udf(count(*)) FROM test_agg WHERE count(*) + 1L > 
1L
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-
-Aggregate/Window/Generate expressions are not valid in where clause of the 
query.
-Expression in where clause: [((count(1) + 1L) > 1L)]
-Invalid expressions: [count(1)]
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2440",
+  "messageParameters" : {
+    "condition" : "((count(1) + 1L) > 1L)",
+    "invalidExprSqls" : "count(1)"
+  }
+}
 
 
 -- !query
@@ -610,7 +633,10 @@ SELECT udf(count(*)) FROM test_agg WHERE k = 1 or k = 2 or 
count(*) + 1L > 1L or
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-
-Aggregate/Window/Generate expressions are not valid in where clause of the 
query.
-Expression in where clause: [(((test_agg.k = 1) OR (test_agg.k = 2)) OR 
(((count(1) + 1L) > 1L) OR (max(test_agg.k) > 1)))]
-Invalid expressions: [count(1), max(test_agg.k)]
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2440",
+  "messageParameters" : {
+    "condition" : "(((test_agg.k = 1) OR (test_agg.k = 2)) OR (((count(1) + 
1L) > 1L) OR (max(test_agg.k) > 1)))",
+    "invalidExprSqls" : "count(1), max(test_agg.k)"
+  }
+}
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/udf-intersect-all.sql.out 
b/sql/core/src/test/resources/sql-tests/results/udf/udf-intersect-all.sql.out
index 68a9e11bd23..a838fbc83cb 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/udf/udf-intersect-all.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/udf/udf-intersect-all.sql.out
@@ -95,7 +95,17 @@ SELECT array(1), udf(2)
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-IntersectAll can only be performed on tables with compatible column types. The 
first column of the second table is array<int> type which is not compatible 
with int at the same column of the first table
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2430",
+  "messageParameters" : {
+    "ci" : "first",
+    "dt1" : "array<int>",
+    "dt2" : "int",
+    "hint" : "",
+    "operator" : "IntersectAll",
+    "ti" : "second"
+  }
+}
 
 
 -- !query
@@ -106,7 +116,15 @@ SELECT udf(k), udf(v) FROM tab2
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-IntersectAll can only be performed on tables with the same number of columns, 
but the first table has 1 columns and the second table has 2 columns
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2429",
+  "messageParameters" : {
+    "firstColNum" : "1",
+    "nColNum" : "2",
+    "nTab" : "second",
+    "operator" : "IntersectAll"
+  }
+}
 
 
 -- !query
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out 
b/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out
index 23e6a4792bc..a48b58d5202 100644
--- a/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out
@@ -283,7 +283,9 @@ PIVOT (
 struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
-It is not allowed to use an aggregate function in the argument of another 
aggregate function. Please use the inner aggregate function in a sub-query.
+{
+  "errorClass" : "_LEGACY_ERROR_TEMP_2420"
+}
 
 
 -- !query


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to