This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new f352f103ed5 [SPARK-41573][SQL] Assign name to _LEGACY_ERROR_TEMP_2136
f352f103ed5 is described below

commit f352f103ed512806abb3f642571a0c595b8b0509
Author: itholic <haejoon....@databricks.com>
AuthorDate: Thu Jan 5 00:21:32 2023 +0500

    [SPARK-41573][SQL] Assign name to _LEGACY_ERROR_TEMP_2136
    
    ### What changes were proposed in this pull request?
    
    This PR proposes to assign name to _LEGACY_ERROR_TEMP_2136, 
"CANNOT_PARSE_JSON_FIELD".
    
    ### Why are the changes needed?
    
    We should assign proper name to _LEGACY_ERROR_TEMP_*
    
    ### Does this PR introduce _any_ user-facing change?
    
    No
    
    ### How was this patch tested?
    
    `./build/sbt "sql/testOnly org.apache.spark.sql.SQLQueryTestSuite*`
    
    Closes #39284 from itholic/LEGACY_2136.
    
    Authored-by: itholic <haejoon....@databricks.com>
    Signed-off-by: Max Gekk <max.g...@gmail.com>
---
 core/src/main/resources/error/error-classes.json    | 10 +++++-----
 .../spark/sql/catalyst/json/JacksonParser.scala     |  2 +-
 .../spark/sql/errors/QueryExecutionErrors.scala     |  8 ++++----
 .../org/apache/spark/sql/JsonFunctionsSuite.scala   | 21 ++++++++++++++-------
 4 files changed, 24 insertions(+), 17 deletions(-)

diff --git a/core/src/main/resources/error/error-classes.json 
b/core/src/main/resources/error/error-classes.json
index a7b120ef427..120925f5254 100644
--- a/core/src/main/resources/error/error-classes.json
+++ b/core/src/main/resources/error/error-classes.json
@@ -75,6 +75,11 @@
     ],
     "sqlState" : "42000"
   },
+  "CANNOT_PARSE_JSON_FIELD" : {
+    "message" : [
+      "Cannot parse the field name <fieldName> and the value <fieldValue> of 
the JSON token type <jsonType> to target Spark data type <dataType>"
+    ]
+  },
   "CANNOT_PARSE_PROTOBUF_DESCRIPTOR" : {
     "message" : [
       "Error parsing file <descFilePath> descriptor byte[] into Descriptor 
object"
@@ -4105,11 +4110,6 @@
       "Failed to parse an empty string for data type <dataType>"
     ]
   },
-  "_LEGACY_ERROR_TEMP_2136" : {
-    "message" : [
-      "Failed to parse field name <fieldName>, field value <fieldValue>, 
[<token>] to target spark data type [<dataType>]."
-    ]
-  },
   "_LEGACY_ERROR_TEMP_2137" : {
     "message" : [
       "Root converter returned null"
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonParser.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonParser.scala
index ee21a1e2b76..3fe26e87499 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonParser.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonParser.scala
@@ -430,7 +430,7 @@ class JacksonParser(
     case token =>
       // We cannot parse this token based on the given data type. So, we throw 
a
       // RuntimeException and this exception will be caught by `parse` method.
-      throw QueryExecutionErrors.failToParseValueForDataTypeError(parser, 
token, dataType)
+      throw QueryExecutionErrors.cannotParseJSONFieldError(parser, token, 
dataType)
   }
 
   /**
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
index 3e234cfee2c..44a1972272f 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
@@ -1444,15 +1444,15 @@ private[sql] object QueryExecutionErrors extends 
QueryErrorsBase {
         "dataType" -> dataType.catalogString))
   }
 
-  def failToParseValueForDataTypeError(parser: JsonParser, token: JsonToken, 
dataType: DataType)
+  def cannotParseJSONFieldError(parser: JsonParser, jsonType: JsonToken, 
dataType: DataType)
   : SparkRuntimeException = {
     new SparkRuntimeException(
-      errorClass = "_LEGACY_ERROR_TEMP_2136",
+      errorClass = "CANNOT_PARSE_JSON_FIELD",
       messageParameters = Map(
         "fieldName" -> parser.getCurrentName.toString(),
         "fieldValue" -> parser.getText.toString(),
-        "token" -> token.toString(),
-        "dataType" -> dataType.toString()))
+        "jsonType" -> jsonType.toString(),
+        "dataType" -> toSQLType(dataType)))
   }
 
   def rootConverterReturnNullError(): SparkRuntimeException = {
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala
index 399665c0de6..0f282336d58 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala
@@ -22,8 +22,9 @@ import java.time.{Duration, LocalDateTime, Period}
 import java.util.Locale
 
 import collection.JavaConverters._
+import org.apache.commons.lang3.exception.ExceptionUtils
 
-import org.apache.spark.SparkException
+import org.apache.spark.{SparkException, SparkRuntimeException}
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.expressions.{Literal, StructsToJson}
 import org.apache.spark.sql.functions._
@@ -774,15 +775,21 @@ class JsonFunctionsSuite extends QueryTest with 
SharedSparkSession {
         df.select(from_json($"value", schema, Map("mode" -> "PERMISSIVE"))),
         Row(Row(null, 11, badRec)) :: Row(Row(2, 12, null)) :: Nil)
 
-      val errMsg = intercept[SparkException] {
+      val exception = intercept[SparkException] {
         df.select(from_json($"value", schema, Map("mode" -> 
"FAILFAST"))).collect()
-      }.getMessage
+      }
 
-      assert(errMsg.contains(
+      assert(exception.getMessage.contains(
         "Malformed records are detected in record parsing. Parse Mode: 
FAILFAST."))
-      assert(errMsg.contains(
-        "Failed to parse field name a, field value 1, " +
-          "[VALUE_STRING] to target spark data type [IntegerType]."))
+      checkError(
+        exception = 
ExceptionUtils.getRootCause(exception).asInstanceOf[SparkRuntimeException],
+        errorClass = "CANNOT_PARSE_JSON_FIELD",
+        parameters = Map(
+          "fieldName" -> "a",
+          "fieldValue" -> "1",
+          "jsonType" -> "VALUE_STRING",
+          "dataType" -> "\"INT\"")
+      )
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to