[ 
https://issues.apache.org/jira/browse/SPARK-53635?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Dongjoon Hyun resolved SPARK-53635.
-----------------------------------
    Fix Version/s: 4.1.0
       Resolution: Fixed

Issue resolved by pull request 53027
[https://github.com/apache/spark/pull/53027]

> udf with array of structure in spark 4
> --------------------------------------
>
>                 Key: SPARK-53635
>                 URL: https://issues.apache.org/jira/browse/SPARK-53635
>             Project: Spark
>          Issue Type: Bug
>          Components: SQL
>    Affects Versions: 4.0.0, 4.0.1
>            Reporter: Jiri Humpolicek
>            Priority: Major
>              Labels: pull-request-available
>             Fix For: 4.1.0
>
>
> Hi, what am I doing wrong? Following is working in spark-3.x but doesn't work 
> in spark-4.x
> {code:scala}
> case class N(navs: Int)
> case class I(first: String, second: Seq[N])
> val h = Seq(I("a", Seq(N(10), N(15)))).toDF
> h.printSchema
> // root
> //  |-- first: string (nullable = true)
> //  |-- second: array (nullable = true)
> //  |    |-- element: struct (containsNull = true)
> //  |    |    |-- navs: integer (nullable = false)
> import org.apache.spark.sql.Row
> import org.apache.spark.sql.expressions.UserDefinedFunction
> val reduceItems = (items: Seq[Row]) => {
>   items.map(_.getAs[Int]("navs")).reduce(_ + _)
> }
> val reduceItemsUdf = udf(reduceItems)
> h.select(reduceItemsUdf($"second").as("r")).show()
> {code}
> ```
> in spark-4.x it throws:
> {code:java}
> scala.MatchError: 
> org.apache.spark.sql.catalyst.encoders.AgnosticEncoders$UnboundRowEncoder$@4aaf547c
>  (of class 
> org.apache.spark.sql.catalyst.encoders.AgnosticEncoders$UnboundRowEncoder$)
>   at 
> org.apache.spark.sql.catalyst.SerializerBuildHelper$.createSerializer(SerializerBuildHelper.scala:308)
>   at 
> org.apache.spark.sql.catalyst.SerializerBuildHelper$.$anonfun$validateAndSerializeElement$1(SerializerBuildHelper.scala:500)
>   at 
> org.apache.spark.sql.catalyst.expressions.objects.MapObjects$.apply(objects.scala:890)
>   at 
> org.apache.spark.sql.catalyst.SerializerBuildHelper$.createSerializerForMapObjects(SerializerBuildHelper.scala:242)
>   at 
> org.apache.spark.sql.catalyst.SerializerBuildHelper$.serializerForArray(SerializerBuildHelper.scala:449)
>   at 
> org.apache.spark.sql.catalyst.SerializerBuildHelper$.createSerializer(SerializerBuildHelper.scala:356)
>   at 
> org.apache.spark.sql.catalyst.SerializerBuildHelper$.createSerializer(SerializerBuildHelper.scala:300)
>   at 
> org.apache.spark.sql.catalyst.encoders.ExpressionEncoder$.apply(ExpressionEncoder.scala:59)
>   at 
> org.apache.spark.sql.catalyst.encoders.package$.encoderFor(package.scala:34)
>   at 
> org.apache.spark.sql.classic.UserDefinedFunctionUtils$$anonfun$$nestedInanonfun$toScalaUDF$1$1.applyOrElse(UserDefinedFunctionUtils.scala:37)
>   at 
> org.apache.spark.sql.classic.UserDefinedFunctionUtils$$anonfun$$nestedInanonfun$toScalaUDF$1$1.applyOrElse(UserDefinedFunctionUtils.scala:35)
>   at scala.PartialFunction$Lifted.apply(PartialFunction.scala:338)
>   at scala.PartialFunction$Lifted.apply(PartialFunction.scala:334)
>   at scala.Option.collect(Option.scala:462)
>   at 
> org.apache.spark.sql.classic.UserDefinedFunctionUtils$.$anonfun$toScalaUDF$1(UserDefinedFunctionUtils.scala:35)
>   at scala.collection.immutable.ArraySeq.map(ArraySeq.scala:75)
>   at scala.collection.immutable.ArraySeq.map(ArraySeq.scala:35)
>   at 
> org.apache.spark.sql.classic.UserDefinedFunctionUtils$.toScalaUDF(UserDefinedFunctionUtils.scala:35)
>   at 
> org.apache.spark.sql.classic.ColumnNodeToExpressionConverter.$anonfun$apply$2(columnNodeSupport.scala:181)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:86)
>   at 
> org.apache.spark.sql.classic.ColumnNodeToExpressionConverter.$anonfun$apply$1(columnNodeSupport.scala:48)
>   at 
> org.apache.spark.sql.internal.SQLConf$.withExistingConf(SQLConf.scala:161)
>   at 
> org.apache.spark.sql.classic.ColumnNodeToExpressionConverter.apply(columnNodeSupport.scala:47)
>   at 
> org.apache.spark.sql.classic.ColumnNodeToExpressionConverter.apply$(columnNodeSupport.scala:46)
>   at 
> org.apache.spark.sql.classic.SparkSession$$anon$3.apply(SparkSession.scala:713)
>   at 
> org.apache.spark.sql.classic.ColumnNodeToExpressionConverter.$anonfun$apply$2(columnNodeSupport.scala:91)
>   at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(origin.scala:86)
>   at 
> org.apache.spark.sql.classic.ColumnNodeToExpressionConverter.$anonfun$apply$1(columnNodeSupport.scala:48)
>   at 
> org.apache.spark.sql.internal.SQLConf$.withExistingConf(SQLConf.scala:161)
>   at 
> org.apache.spark.sql.classic.ColumnNodeToExpressionConverter.apply(columnNodeSupport.scala:47)
>   at 
> org.apache.spark.sql.classic.ColumnNodeToExpressionConverter.apply$(columnNodeSupport.scala:46)
>   at 
> org.apache.spark.sql.classic.SparkSession$$anon$3.apply(SparkSession.scala:713)
>   at org.apache.spark.sql.classic.RichColumn.expr(conversions.scala:106)
>   at org.apache.spark.sql.classic.RichColumn.named(conversions.scala:110)
>   at org.apache.spark.sql.classic.Dataset.$anonfun$select$3(Dataset.scala:894)
>   at scala.collection.immutable.ArraySeq.map(ArraySeq.scala:75)
>   at scala.collection.immutable.ArraySeq.map(ArraySeq.scala:35)
>   at org.apache.spark.sql.classic.Dataset.select(Dataset.scala:894)
>   at org.apache.spark.sql.classic.Dataset.select(Dataset.scala:232)
> {code}



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to