This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new c17b0922788f [SPARK-45967][BUILD] Upgrade jackson to 2.16.0
c17b0922788f is described below

commit c17b0922788fa182e792f8858df149a33ff9343f
Author: panbingkun <pbk1...@gmail.com>
AuthorDate: Fri Nov 17 21:39:33 2023 -0800

    [SPARK-45967][BUILD] Upgrade jackson to 2.16.0
    
    ### What changes were proposed in this pull request?
    The pr aims to upgrade FasterXML jackson from 2.15.2 to 2.16.0.
    
    ### Why are the changes needed?
    New version that fix some bugs, release notes as follows:
    - 2.1.6.0 https://github.com/FasterXML/jackson/wiki/Jackson-Release-2.16, 
eg:
    [Databind](https://github.com/FasterXML/jackson-databind) 
[#1770](https://github.com/FasterXML/jackson-databind/issues/1770): Incorrect 
deserialization for BigDecimal numbers
    - 2.15.3 https://github.com/FasterXML/jackson/wiki/Jackson-Release-2.15.3, 
eg:
    [Databind](https://github.com/FasterXML/jackson-databind) 
[#3968](https://github.com/FasterXML/jackson-databind/issues/3968): Records 
with additional constructors failed to deserialize
    
    The last upgrade occurred 6 months ago, 
https://github.com/apache/spark/pull/41414
    
    ### Does this PR introduce _any_ user-facing change?
    No.
    
    ### How was this patch tested?
    Pass GA.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    No.
    
    Closes #43859 from panbingkun/SPARK-45967.
    
    Authored-by: panbingkun <pbk1...@gmail.com>
    Signed-off-by: Dongjoon Hyun <dh...@apple.com>
---
 dev/deps/spark-deps-hadoop-3-hive-2.3                    | 16 ++++++++--------
 pom.xml                                                  |  4 ++--
 .../scala/org/apache/spark/sql/JsonFunctionsSuite.scala  |  6 ++++--
 3 files changed, 14 insertions(+), 12 deletions(-)

diff --git a/dev/deps/spark-deps-hadoop-3-hive-2.3 
b/dev/deps/spark-deps-hadoop-3-hive-2.3
index cf469f12bcf9..afde3307c622 100644
--- a/dev/deps/spark-deps-hadoop-3-hive-2.3
+++ b/dev/deps/spark-deps-hadoop-3-hive-2.3
@@ -98,15 +98,15 @@ httpcore/4.4.16//httpcore-4.4.16.jar
 ini4j/0.5.4//ini4j-0.5.4.jar
 istack-commons-runtime/3.0.8//istack-commons-runtime-3.0.8.jar
 ivy/2.5.1//ivy-2.5.1.jar
-jackson-annotations/2.15.2//jackson-annotations-2.15.2.jar
+jackson-annotations/2.16.0//jackson-annotations-2.16.0.jar
 jackson-core-asl/1.9.13//jackson-core-asl-1.9.13.jar
-jackson-core/2.15.2//jackson-core-2.15.2.jar
-jackson-databind/2.15.2//jackson-databind-2.15.2.jar
-jackson-dataformat-cbor/2.15.2//jackson-dataformat-cbor-2.15.2.jar
-jackson-dataformat-yaml/2.15.2//jackson-dataformat-yaml-2.15.2.jar
-jackson-datatype-jsr310/2.15.2//jackson-datatype-jsr310-2.15.2.jar
+jackson-core/2.16.0//jackson-core-2.16.0.jar
+jackson-databind/2.16.0//jackson-databind-2.16.0.jar
+jackson-dataformat-cbor/2.16.0//jackson-dataformat-cbor-2.16.0.jar
+jackson-dataformat-yaml/2.16.0//jackson-dataformat-yaml-2.16.0.jar
+jackson-datatype-jsr310/2.16.0//jackson-datatype-jsr310-2.16.0.jar
 jackson-mapper-asl/1.9.13//jackson-mapper-asl-1.9.13.jar
-jackson-module-scala_2.13/2.15.2//jackson-module-scala_2.13-2.15.2.jar
+jackson-module-scala_2.13/2.16.0//jackson-module-scala_2.13-2.16.0.jar
 jakarta.annotation-api/1.3.5//jakarta.annotation-api-1.3.5.jar
 jakarta.inject/2.6.1//jakarta.inject-2.6.1.jar
 jakarta.servlet-api/4.0.3//jakarta.servlet-api-4.0.3.jar
@@ -244,7 +244,7 @@ scala-reflect/2.13.12//scala-reflect-2.13.12.jar
 scala-xml_2.13/2.2.0//scala-xml_2.13-2.2.0.jar
 slf4j-api/2.0.9//slf4j-api-2.0.9.jar
 snakeyaml-engine/2.7//snakeyaml-engine-2.7.jar
-snakeyaml/2.0//snakeyaml-2.0.jar
+snakeyaml/2.2//snakeyaml-2.2.jar
 snappy-java/1.1.10.5//snappy-java-1.1.10.5.jar
 spire-macros_2.13/0.18.0//spire-macros_2.13-0.18.0.jar
 spire-platform_2.13/0.18.0//spire-platform_2.13-0.18.0.jar
diff --git a/pom.xml b/pom.xml
index f8363a66abdb..7fea143bb4d2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -185,8 +185,8 @@
     <scalafmt.validateOnly>true</scalafmt.validateOnly>
     <scalafmt.changedOnly>true</scalafmt.changedOnly>
     <codehaus.jackson.version>1.9.13</codehaus.jackson.version>
-    <fasterxml.jackson.version>2.15.2</fasterxml.jackson.version>
-    
<fasterxml.jackson.databind.version>2.15.2</fasterxml.jackson.databind.version>
+    <fasterxml.jackson.version>2.16.0</fasterxml.jackson.version>
+    
<fasterxml.jackson.databind.version>2.16.0</fasterxml.jackson.databind.version>
     <ws.xmlschema.version>2.3.0</ws.xmlschema.version>
     <org.glassfish.jaxb.txw2.version>3.0.2</org.glassfish.jaxb.txw2.version>
     <snappy.version>1.1.10.5</snappy.version>
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala
index 933f362db663..87593afb332d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala
@@ -1175,7 +1175,8 @@ class JsonFunctionsSuite extends QueryTest with 
SharedSparkSession {
     val invalidDataType = "MAP<INT, cow>"
     val invalidDataTypeReason = "Unrecognized token 'MAP': " +
       "was expecting (JSON String, Number, Array, Object or token 'null', 
'true' or 'false')\n " +
-      "at [Source: (String)\"MAP<INT, cow>\"; line: 1, column: 4]"
+      "at [Source: REDACTED (`StreamReadFeature.INCLUDE_SOURCE_IN_LOCATION` 
disabled); " +
+      "line: 1, column: 4]"
     checkError(
       exception = intercept[AnalysisException] {
         df.select(from_json($"json", invalidDataType, Map.empty[String, 
String])).collect()
@@ -1190,7 +1191,8 @@ class JsonFunctionsSuite extends QueryTest with 
SharedSparkSession {
     val invalidTableSchema = "x INT, a cow"
     val invalidTableSchemaReason = "Unrecognized token 'x': " +
       "was expecting (JSON String, Number, Array, Object or token 'null', 
'true' or 'false')\n" +
-      " at [Source: (String)\"x INT, a cow\"; line: 1, column: 2]"
+      " at [Source: REDACTED (`StreamReadFeature.INCLUDE_SOURCE_IN_LOCATION` 
disabled); " +
+      "line: 1, column: 2]"
     checkError(
       exception = intercept[AnalysisException] {
         df.select(from_json($"json", invalidTableSchema, Map.empty[String, 
String])).collect()


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to