[jira] [Commented] (SPARK-36673) Incorrect Unions of struct with mismatched field name case
[ https://issues.apache.org/jira/browse/SPARK-36673?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=17416782#comment-17416782 ] Apache Spark commented on SPARK-36673: -- User 'viirya' has created a pull request for this issue: https://github.com/apache/spark/pull/34032 > Incorrect Unions of struct with mismatched field name case > -- > > Key: SPARK-36673 > URL: https://issues.apache.org/jira/browse/SPARK-36673 > Project: Spark > Issue Type: Bug > Components: SQL >Affects Versions: 3.1.1, 3.2.0 >Reporter: Shardul Mahadik >Assignee: L. C. Hsieh >Priority: Major > Fix For: 3.2.0 > > > If a nested field has different casing on two sides of the union, the > resultant schema of the union will both fields in its schemaa > {code:java} > scala> val df1 = spark.range(2).withColumn("nested", struct(expr("id * 5 AS > INNER"))) > df1: org.apache.spark.sql.DataFrame = [id: bigint, nested: struct bigint>] > val df2 = spark.range(2).withColumn("nested", struct(expr("id * 5 AS inner"))) > df2: org.apache.spark.sql.DataFrame = [id: bigint, nested: struct bigint>] > scala> df1.union(df2).printSchema > root > |-- id: long (nullable = false) > |-- nested: struct (nullable = false) > ||-- INNER: long (nullable = false) > ||-- inner: long (nullable = false) > {code} > This seems like a bug. I would expect that Spark SQL would either just union > by index or if the user has requested {{unionByName}}, then it should matched > fields case insensitively if {{spark.sql.caseSensitive}} is {{false}}. > However the output data only has one nested column > {code:java} > scala> df1.union(df2).show() > +---+--+ > | id|nested| > +---+--+ > | 0| {0}| > | 1| {5}| > | 0| {0}| > | 1| {5}| > +---+--+ > {code} > Trying to project fields of {{nested}} throws an error: > {code:java} > scala> df1.union(df2).select("nested.*").show() > java.lang.ArrayIndexOutOfBoundsException: 1 > at org.apache.spark.sql.types.StructType.apply(StructType.scala:414) > at > org.apache.spark.sql.catalyst.expressions.GetStructField.dataType(complexTypeExtractors.scala:108) > at > org.apache.spark.sql.catalyst.expressions.Alias.toAttribute(namedExpressions.scala:192) > at > org.apache.spark.sql.catalyst.plans.logical.Project.$anonfun$output$1(basicLogicalOperators.scala:63) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238) > at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62) > at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55) > at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49) > at scala.collection.TraversableLike.map(TraversableLike.scala:238) > at scala.collection.TraversableLike.map$(TraversableLike.scala:231) > at scala.collection.AbstractTraversable.map(Traversable.scala:108) > at > org.apache.spark.sql.catalyst.plans.logical.Project.output(basicLogicalOperators.scala:63) > at > org.apache.spark.sql.catalyst.plans.logical.Union.$anonfun$output$3(basicLogicalOperators.scala:260) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238) > at scala.collection.immutable.List.foreach(List.scala:392) > at scala.collection.TraversableLike.map(TraversableLike.scala:238) > at scala.collection.TraversableLike.map$(TraversableLike.scala:231) > at scala.collection.immutable.List.map(List.scala:298) > at > org.apache.spark.sql.catalyst.plans.logical.Union.output(basicLogicalOperators.scala:260) > at > org.apache.spark.sql.catalyst.plans.QueryPlan.outputSet$lzycompute(QueryPlan.scala:49) > at > org.apache.spark.sql.catalyst.plans.QueryPlan.outputSet(QueryPlan.scala:49) > at > org.apache.spark.sql.catalyst.optimizer.ColumnPruning$$anonfun$apply$8.applyOrElse(Optimizer.scala:747) > at > org.apache.spark.sql.catalyst.optimizer.ColumnPruning$$anonfun$apply$8.applyOrElse(Optimizer.scala:695) > at > org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDown$1(TreeNode.scala:316) > at > org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:72) > at > org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:316) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDown(LogicalPlan.scala:29) > at > org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDown(AnalysisHelper.scala:171) > at > org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDown$(AnalysisHelper.scala:169) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDown(LogicalPlan.scala:29) > at >
[jira] [Commented] (SPARK-36673) Incorrect Unions of struct with mismatched field name case
[ https://issues.apache.org/jira/browse/SPARK-36673?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=17416311#comment-17416311 ] Apache Spark commented on SPARK-36673: -- User 'viirya' has created a pull request for this issue: https://github.com/apache/spark/pull/34025 > Incorrect Unions of struct with mismatched field name case > -- > > Key: SPARK-36673 > URL: https://issues.apache.org/jira/browse/SPARK-36673 > Project: Spark > Issue Type: Bug > Components: SQL >Affects Versions: 3.1.1, 3.2.0 >Reporter: Shardul Mahadik >Priority: Major > > If a nested field has different casing on two sides of the union, the > resultant schema of the union will both fields in its schemaa > {code:java} > scala> val df1 = spark.range(2).withColumn("nested", struct(expr("id * 5 AS > INNER"))) > df1: org.apache.spark.sql.DataFrame = [id: bigint, nested: struct bigint>] > val df2 = spark.range(2).withColumn("nested", struct(expr("id * 5 AS inner"))) > df2: org.apache.spark.sql.DataFrame = [id: bigint, nested: struct bigint>] > scala> df1.union(df2).printSchema > root > |-- id: long (nullable = false) > |-- nested: struct (nullable = false) > ||-- INNER: long (nullable = false) > ||-- inner: long (nullable = false) > {code} > This seems like a bug. I would expect that Spark SQL would either just union > by index or if the user has requested {{unionByName}}, then it should matched > fields case insensitively if {{spark.sql.caseSensitive}} is {{false}}. > However the output data only has one nested column > {code:java} > scala> df1.union(df2).show() > +---+--+ > | id|nested| > +---+--+ > | 0| {0}| > | 1| {5}| > | 0| {0}| > | 1| {5}| > +---+--+ > {code} > Trying to project fields of {{nested}} throws an error: > {code:java} > scala> df1.union(df2).select("nested.*").show() > java.lang.ArrayIndexOutOfBoundsException: 1 > at org.apache.spark.sql.types.StructType.apply(StructType.scala:414) > at > org.apache.spark.sql.catalyst.expressions.GetStructField.dataType(complexTypeExtractors.scala:108) > at > org.apache.spark.sql.catalyst.expressions.Alias.toAttribute(namedExpressions.scala:192) > at > org.apache.spark.sql.catalyst.plans.logical.Project.$anonfun$output$1(basicLogicalOperators.scala:63) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238) > at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62) > at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55) > at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49) > at scala.collection.TraversableLike.map(TraversableLike.scala:238) > at scala.collection.TraversableLike.map$(TraversableLike.scala:231) > at scala.collection.AbstractTraversable.map(Traversable.scala:108) > at > org.apache.spark.sql.catalyst.plans.logical.Project.output(basicLogicalOperators.scala:63) > at > org.apache.spark.sql.catalyst.plans.logical.Union.$anonfun$output$3(basicLogicalOperators.scala:260) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238) > at scala.collection.immutable.List.foreach(List.scala:392) > at scala.collection.TraversableLike.map(TraversableLike.scala:238) > at scala.collection.TraversableLike.map$(TraversableLike.scala:231) > at scala.collection.immutable.List.map(List.scala:298) > at > org.apache.spark.sql.catalyst.plans.logical.Union.output(basicLogicalOperators.scala:260) > at > org.apache.spark.sql.catalyst.plans.QueryPlan.outputSet$lzycompute(QueryPlan.scala:49) > at > org.apache.spark.sql.catalyst.plans.QueryPlan.outputSet(QueryPlan.scala:49) > at > org.apache.spark.sql.catalyst.optimizer.ColumnPruning$$anonfun$apply$8.applyOrElse(Optimizer.scala:747) > at > org.apache.spark.sql.catalyst.optimizer.ColumnPruning$$anonfun$apply$8.applyOrElse(Optimizer.scala:695) > at > org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDown$1(TreeNode.scala:316) > at > org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:72) > at > org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:316) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDown(LogicalPlan.scala:29) > at > org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDown(AnalysisHelper.scala:171) > at > org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDown$(AnalysisHelper.scala:169) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDown(LogicalPlan.scala:29) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDown(LogicalPlan.scala:29) > at >
[jira] [Commented] (SPARK-36673) Incorrect Unions of struct with mismatched field name case
[ https://issues.apache.org/jira/browse/SPARK-36673?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=17415921#comment-17415921 ] L. C. Hsieh commented on SPARK-36673: - The schema after union looks incorrect. By the definition of `union`, there should be only one column in the nested struct. > Incorrect Unions of struct with mismatched field name case > -- > > Key: SPARK-36673 > URL: https://issues.apache.org/jira/browse/SPARK-36673 > Project: Spark > Issue Type: Bug > Components: SQL >Affects Versions: 3.1.1, 3.2.0 >Reporter: Shardul Mahadik >Priority: Major > > If a nested field has different casing on two sides of the union, the > resultant schema of the union will both fields in its schemaa > {code:java} > scala> val df1 = spark.range(2).withColumn("nested", struct(expr("id * 5 AS > INNER"))) > df1: org.apache.spark.sql.DataFrame = [id: bigint, nested: struct bigint>] > val df2 = spark.range(2).withColumn("nested", struct(expr("id * 5 AS inner"))) > df2: org.apache.spark.sql.DataFrame = [id: bigint, nested: struct bigint>] > scala> df1.union(df2).printSchema > root > |-- id: long (nullable = false) > |-- nested: struct (nullable = false) > ||-- INNER: long (nullable = false) > ||-- inner: long (nullable = false) > {code} > This seems like a bug. I would expect that Spark SQL would either just union > by index or if the user has requested {{unionByName}}, then it should matched > fields case insensitively if {{spark.sql.caseSensitive}} is {{false}}. > However the output data only has one nested column > {code:java} > scala> df1.union(df2).show() > +---+--+ > | id|nested| > +---+--+ > | 0| {0}| > | 1| {5}| > | 0| {0}| > | 1| {5}| > +---+--+ > {code} > Trying to project fields of {{nested}} throws an error: > {code:java} > scala> df1.union(df2).select("nested.*").show() > java.lang.ArrayIndexOutOfBoundsException: 1 > at org.apache.spark.sql.types.StructType.apply(StructType.scala:414) > at > org.apache.spark.sql.catalyst.expressions.GetStructField.dataType(complexTypeExtractors.scala:108) > at > org.apache.spark.sql.catalyst.expressions.Alias.toAttribute(namedExpressions.scala:192) > at > org.apache.spark.sql.catalyst.plans.logical.Project.$anonfun$output$1(basicLogicalOperators.scala:63) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238) > at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62) > at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55) > at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49) > at scala.collection.TraversableLike.map(TraversableLike.scala:238) > at scala.collection.TraversableLike.map$(TraversableLike.scala:231) > at scala.collection.AbstractTraversable.map(Traversable.scala:108) > at > org.apache.spark.sql.catalyst.plans.logical.Project.output(basicLogicalOperators.scala:63) > at > org.apache.spark.sql.catalyst.plans.logical.Union.$anonfun$output$3(basicLogicalOperators.scala:260) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238) > at scala.collection.immutable.List.foreach(List.scala:392) > at scala.collection.TraversableLike.map(TraversableLike.scala:238) > at scala.collection.TraversableLike.map$(TraversableLike.scala:231) > at scala.collection.immutable.List.map(List.scala:298) > at > org.apache.spark.sql.catalyst.plans.logical.Union.output(basicLogicalOperators.scala:260) > at > org.apache.spark.sql.catalyst.plans.QueryPlan.outputSet$lzycompute(QueryPlan.scala:49) > at > org.apache.spark.sql.catalyst.plans.QueryPlan.outputSet(QueryPlan.scala:49) > at > org.apache.spark.sql.catalyst.optimizer.ColumnPruning$$anonfun$apply$8.applyOrElse(Optimizer.scala:747) > at > org.apache.spark.sql.catalyst.optimizer.ColumnPruning$$anonfun$apply$8.applyOrElse(Optimizer.scala:695) > at > org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDown$1(TreeNode.scala:316) > at > org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:72) > at > org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:316) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDown(LogicalPlan.scala:29) > at > org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDown(AnalysisHelper.scala:171) > at > org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDown$(AnalysisHelper.scala:169) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDown(LogicalPlan.scala:29) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDown(LogicalPlan.scala:29) > at >
[jira] [Commented] (SPARK-36673) Incorrect Unions of struct with mismatched field name case
[ https://issues.apache.org/jira/browse/SPARK-36673?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=17415864#comment-17415864 ] Wenchen Fan commented on SPARK-36673: - This seems like a bug. [~viirya] what do you think? > Incorrect Unions of struct with mismatched field name case > -- > > Key: SPARK-36673 > URL: https://issues.apache.org/jira/browse/SPARK-36673 > Project: Spark > Issue Type: Bug > Components: SQL >Affects Versions: 3.1.1, 3.2.0 >Reporter: Shardul Mahadik >Priority: Major > > If a nested field has different casing on two sides of the union, the > resultant schema of the union will both fields in its schemaa > {code:java} > scala> val df1 = spark.range(2).withColumn("nested", struct(expr("id * 5 AS > INNER"))) > df1: org.apache.spark.sql.DataFrame = [id: bigint, nested: struct bigint>] > val df2 = spark.range(2).withColumn("nested", struct(expr("id * 5 AS inner"))) > df2: org.apache.spark.sql.DataFrame = [id: bigint, nested: struct bigint>] > scala> df1.union(df2).printSchema > root > |-- id: long (nullable = false) > |-- nested: struct (nullable = false) > ||-- INNER: long (nullable = false) > ||-- inner: long (nullable = false) > {code} > This seems like a bug. I would expect that Spark SQL would either just union > by index or if the user has requested {{unionByName}}, then it should matched > fields case insensitively if {{spark.sql.caseSensitive}} is {{false}}. > However the output data only has one nested column > {code:java} > scala> df1.union(df2).show() > +---+--+ > | id|nested| > +---+--+ > | 0| {0}| > | 1| {5}| > | 0| {0}| > | 1| {5}| > +---+--+ > {code} > Trying to project fields of {{nested}} throws an error: > {code:java} > scala> df1.union(df2).select("nested.*").show() > java.lang.ArrayIndexOutOfBoundsException: 1 > at org.apache.spark.sql.types.StructType.apply(StructType.scala:414) > at > org.apache.spark.sql.catalyst.expressions.GetStructField.dataType(complexTypeExtractors.scala:108) > at > org.apache.spark.sql.catalyst.expressions.Alias.toAttribute(namedExpressions.scala:192) > at > org.apache.spark.sql.catalyst.plans.logical.Project.$anonfun$output$1(basicLogicalOperators.scala:63) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238) > at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62) > at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55) > at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49) > at scala.collection.TraversableLike.map(TraversableLike.scala:238) > at scala.collection.TraversableLike.map$(TraversableLike.scala:231) > at scala.collection.AbstractTraversable.map(Traversable.scala:108) > at > org.apache.spark.sql.catalyst.plans.logical.Project.output(basicLogicalOperators.scala:63) > at > org.apache.spark.sql.catalyst.plans.logical.Union.$anonfun$output$3(basicLogicalOperators.scala:260) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238) > at scala.collection.immutable.List.foreach(List.scala:392) > at scala.collection.TraversableLike.map(TraversableLike.scala:238) > at scala.collection.TraversableLike.map$(TraversableLike.scala:231) > at scala.collection.immutable.List.map(List.scala:298) > at > org.apache.spark.sql.catalyst.plans.logical.Union.output(basicLogicalOperators.scala:260) > at > org.apache.spark.sql.catalyst.plans.QueryPlan.outputSet$lzycompute(QueryPlan.scala:49) > at > org.apache.spark.sql.catalyst.plans.QueryPlan.outputSet(QueryPlan.scala:49) > at > org.apache.spark.sql.catalyst.optimizer.ColumnPruning$$anonfun$apply$8.applyOrElse(Optimizer.scala:747) > at > org.apache.spark.sql.catalyst.optimizer.ColumnPruning$$anonfun$apply$8.applyOrElse(Optimizer.scala:695) > at > org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDown$1(TreeNode.scala:316) > at > org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:72) > at > org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:316) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDown(LogicalPlan.scala:29) > at > org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDown(AnalysisHelper.scala:171) > at > org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDown$(AnalysisHelper.scala:169) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDown(LogicalPlan.scala:29) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDown(LogicalPlan.scala:29) > at >
[jira] [Commented] (SPARK-36673) Incorrect Unions of struct with mismatched field name case
[ https://issues.apache.org/jira/browse/SPARK-36673?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=17412841#comment-17412841 ] Erik Krogen commented on SPARK-36673: - >From the Scaladoc for {{union}}: {code} * Also as standard in SQL, this function resolves columns by position (not by name): {code} So it seems to me we should be ignoring the names altogether and just doing positional matching. {quote} AFAIK, in SQL the names in the struct are case sensitive, while the name of the normal fields are not. {quote} I was under the impression that nested fields are also case-insensitive. Do we have any documentation around this? In any case, when we fix this, we need to be careful about {{unionByName}}, including the new-ish {{allowMissingColumns}} option... > Incorrect Unions of struct with mismatched field name case > -- > > Key: SPARK-36673 > URL: https://issues.apache.org/jira/browse/SPARK-36673 > Project: Spark > Issue Type: Bug > Components: SQL >Affects Versions: 3.1.1, 3.2.0 >Reporter: Shardul Mahadik >Priority: Major > > If a nested field has different casing on two sides of the union, the > resultant schema of the union will both fields in its schemaa > {code:java} > scala> val df1 = spark.range(2).withColumn("nested", struct(expr("id * 5 AS > INNER"))) > df1: org.apache.spark.sql.DataFrame = [id: bigint, nested: struct bigint>] > val df2 = spark.range(2).withColumn("nested", struct(expr("id * 5 AS inner"))) > df2: org.apache.spark.sql.DataFrame = [id: bigint, nested: struct bigint>] > scala> df1.union(df2).printSchema > root > |-- id: long (nullable = false) > |-- nested: struct (nullable = false) > ||-- INNER: long (nullable = false) > ||-- inner: long (nullable = false) > {code} > This seems like a bug. I would expect that Spark SQL would either just union > by index or if the user has requested {{unionByName}}, then it should matched > fields case insensitively if {{spark.sql.caseSensitive}} is {{false}}. > However the output data only has one nested column > {code:java} > scala> df1.union(df2).show() > +---+--+ > | id|nested| > +---+--+ > | 0| {0}| > | 1| {5}| > | 0| {0}| > | 1| {5}| > +---+--+ > {code} > Trying to project fields of {{nested}} throws an error: > {code:java} > scala> df1.union(df2).select("nested.*").show() > java.lang.ArrayIndexOutOfBoundsException: 1 > at org.apache.spark.sql.types.StructType.apply(StructType.scala:414) > at > org.apache.spark.sql.catalyst.expressions.GetStructField.dataType(complexTypeExtractors.scala:108) > at > org.apache.spark.sql.catalyst.expressions.Alias.toAttribute(namedExpressions.scala:192) > at > org.apache.spark.sql.catalyst.plans.logical.Project.$anonfun$output$1(basicLogicalOperators.scala:63) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238) > at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62) > at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55) > at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49) > at scala.collection.TraversableLike.map(TraversableLike.scala:238) > at scala.collection.TraversableLike.map$(TraversableLike.scala:231) > at scala.collection.AbstractTraversable.map(Traversable.scala:108) > at > org.apache.spark.sql.catalyst.plans.logical.Project.output(basicLogicalOperators.scala:63) > at > org.apache.spark.sql.catalyst.plans.logical.Union.$anonfun$output$3(basicLogicalOperators.scala:260) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238) > at scala.collection.immutable.List.foreach(List.scala:392) > at scala.collection.TraversableLike.map(TraversableLike.scala:238) > at scala.collection.TraversableLike.map$(TraversableLike.scala:231) > at scala.collection.immutable.List.map(List.scala:298) > at > org.apache.spark.sql.catalyst.plans.logical.Union.output(basicLogicalOperators.scala:260) > at > org.apache.spark.sql.catalyst.plans.QueryPlan.outputSet$lzycompute(QueryPlan.scala:49) > at > org.apache.spark.sql.catalyst.plans.QueryPlan.outputSet(QueryPlan.scala:49) > at > org.apache.spark.sql.catalyst.optimizer.ColumnPruning$$anonfun$apply$8.applyOrElse(Optimizer.scala:747) > at > org.apache.spark.sql.catalyst.optimizer.ColumnPruning$$anonfun$apply$8.applyOrElse(Optimizer.scala:695) > at > org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDown$1(TreeNode.scala:316) > at > org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:72) > at > org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:316) > at >
[jira] [Commented] (SPARK-36673) Incorrect Unions of struct with mismatched field name case
[ https://issues.apache.org/jira/browse/SPARK-36673?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=17410488#comment-17410488 ] Marco Gaido commented on SPARK-36673: - AFAIK, in SQL the names in the struct are case sensitive, while the name of the normal fields are not. I am not sure about the right behavior here, but maybe I would expect an error at analysis time. Definitely, the current behavior is not correct. > Incorrect Unions of struct with mismatched field name case > -- > > Key: SPARK-36673 > URL: https://issues.apache.org/jira/browse/SPARK-36673 > Project: Spark > Issue Type: Bug > Components: SQL >Affects Versions: 3.1.1, 3.2.0 >Reporter: Shardul Mahadik >Priority: Major > > If a nested field has different casing on two sides of the union, the > resultant schema of the union will both fields in its schemaa > {code:java} > scala> val df1 = spark.range(2).withColumn("nested", struct(expr("id * 5 AS > INNER"))) > df1: org.apache.spark.sql.DataFrame = [id: bigint, nested: struct bigint>] > val df2 = spark.range(2).withColumn("nested", struct(expr("id * 5 AS inner"))) > df2: org.apache.spark.sql.DataFrame = [id: bigint, nested: struct bigint>] > scala> df1.union(df2).printSchema > root > |-- id: long (nullable = false) > |-- nested: struct (nullable = false) > ||-- INNER: long (nullable = false) > ||-- inner: long (nullable = false) > {code} > This seems like a bug. I would expect that Spark SQL would either just union > by index or if the user has requested {{unionByName}}, then it should matched > fields case insensitively if {{spark.sql.caseSensitive}} is {{false}}. > However the output data only has one nested column > {code:java} > scala> df1.union(df2).show() > +---+--+ > | id|nested| > +---+--+ > | 0| {0}| > | 1| {5}| > | 0| {0}| > | 1| {5}| > +---+--+ > {code} > Trying to project fields of {{nested}} throws an error: > {code:java} > scala> df1.union(df2).select("nested.*").show() > java.lang.ArrayIndexOutOfBoundsException: 1 > at org.apache.spark.sql.types.StructType.apply(StructType.scala:414) > at > org.apache.spark.sql.catalyst.expressions.GetStructField.dataType(complexTypeExtractors.scala:108) > at > org.apache.spark.sql.catalyst.expressions.Alias.toAttribute(namedExpressions.scala:192) > at > org.apache.spark.sql.catalyst.plans.logical.Project.$anonfun$output$1(basicLogicalOperators.scala:63) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238) > at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62) > at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55) > at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49) > at scala.collection.TraversableLike.map(TraversableLike.scala:238) > at scala.collection.TraversableLike.map$(TraversableLike.scala:231) > at scala.collection.AbstractTraversable.map(Traversable.scala:108) > at > org.apache.spark.sql.catalyst.plans.logical.Project.output(basicLogicalOperators.scala:63) > at > org.apache.spark.sql.catalyst.plans.logical.Union.$anonfun$output$3(basicLogicalOperators.scala:260) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238) > at scala.collection.immutable.List.foreach(List.scala:392) > at scala.collection.TraversableLike.map(TraversableLike.scala:238) > at scala.collection.TraversableLike.map$(TraversableLike.scala:231) > at scala.collection.immutable.List.map(List.scala:298) > at > org.apache.spark.sql.catalyst.plans.logical.Union.output(basicLogicalOperators.scala:260) > at > org.apache.spark.sql.catalyst.plans.QueryPlan.outputSet$lzycompute(QueryPlan.scala:49) > at > org.apache.spark.sql.catalyst.plans.QueryPlan.outputSet(QueryPlan.scala:49) > at > org.apache.spark.sql.catalyst.optimizer.ColumnPruning$$anonfun$apply$8.applyOrElse(Optimizer.scala:747) > at > org.apache.spark.sql.catalyst.optimizer.ColumnPruning$$anonfun$apply$8.applyOrElse(Optimizer.scala:695) > at > org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDown$1(TreeNode.scala:316) > at > org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:72) > at > org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:316) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDown(LogicalPlan.scala:29) > at > org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDown(AnalysisHelper.scala:171) > at > org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDown$(AnalysisHelper.scala:169) > at >
[jira] [Commented] (SPARK-36673) Incorrect Unions of struct with mismatched field name case
[ https://issues.apache.org/jira/browse/SPARK-36673?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=17410477#comment-17410477 ] Shardul Mahadik commented on SPARK-36673: - [~mgaido] [~cloud_fan] Since you guys were involved in the original PR for SPARK-26812, do you have thoughts on what the right behavior is here? > Incorrect Unions of struct with mismatched field name case > -- > > Key: SPARK-36673 > URL: https://issues.apache.org/jira/browse/SPARK-36673 > Project: Spark > Issue Type: Bug > Components: SQL >Affects Versions: 3.1.1, 3.2.0 >Reporter: Shardul Mahadik >Priority: Major > > If a nested field has different casing on two sides of the union, the > resultant schema of the union will both fields in its schemaa > {code:java} > scala> val df1 = spark.range(2).withColumn("nested", struct(expr("id * 5 AS > INNER"))) > df1: org.apache.spark.sql.DataFrame = [id: bigint, nested: struct bigint>] > val df2 = spark.range(2).withColumn("nested", struct(expr("id * 5 AS inner"))) > df2: org.apache.spark.sql.DataFrame = [id: bigint, nested: struct bigint>] > scala> df1.union(df2).printSchema > root > |-- id: long (nullable = false) > |-- nested: struct (nullable = false) > ||-- INNER: long (nullable = false) > ||-- inner: long (nullable = false) > {code} > This seems like a bug. I would expect that Spark SQL would either just union > by index or if the user has requested {{unionByName}}, then it should matched > fields case insensitively if {{spark.sql.caseSensitive}} is {{false}}. > However the output data only has one nested column > {code:java} > scala> df1.union(df2).show() > +---+--+ > | id|nested| > +---+--+ > | 0| {0}| > | 1| {5}| > | 0| {0}| > | 1| {5}| > +---+--+ > {code} > Trying to project fields of {{nested}} throws an error: > {code:java} > scala> df1.union(df2).select("nested.*").show() > java.lang.ArrayIndexOutOfBoundsException: 1 > at org.apache.spark.sql.types.StructType.apply(StructType.scala:414) > at > org.apache.spark.sql.catalyst.expressions.GetStructField.dataType(complexTypeExtractors.scala:108) > at > org.apache.spark.sql.catalyst.expressions.Alias.toAttribute(namedExpressions.scala:192) > at > org.apache.spark.sql.catalyst.plans.logical.Project.$anonfun$output$1(basicLogicalOperators.scala:63) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238) > at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62) > at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55) > at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49) > at scala.collection.TraversableLike.map(TraversableLike.scala:238) > at scala.collection.TraversableLike.map$(TraversableLike.scala:231) > at scala.collection.AbstractTraversable.map(Traversable.scala:108) > at > org.apache.spark.sql.catalyst.plans.logical.Project.output(basicLogicalOperators.scala:63) > at > org.apache.spark.sql.catalyst.plans.logical.Union.$anonfun$output$3(basicLogicalOperators.scala:260) > at > scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238) > at scala.collection.immutable.List.foreach(List.scala:392) > at scala.collection.TraversableLike.map(TraversableLike.scala:238) > at scala.collection.TraversableLike.map$(TraversableLike.scala:231) > at scala.collection.immutable.List.map(List.scala:298) > at > org.apache.spark.sql.catalyst.plans.logical.Union.output(basicLogicalOperators.scala:260) > at > org.apache.spark.sql.catalyst.plans.QueryPlan.outputSet$lzycompute(QueryPlan.scala:49) > at > org.apache.spark.sql.catalyst.plans.QueryPlan.outputSet(QueryPlan.scala:49) > at > org.apache.spark.sql.catalyst.optimizer.ColumnPruning$$anonfun$apply$8.applyOrElse(Optimizer.scala:747) > at > org.apache.spark.sql.catalyst.optimizer.ColumnPruning$$anonfun$apply$8.applyOrElse(Optimizer.scala:695) > at > org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDown$1(TreeNode.scala:316) > at > org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:72) > at > org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:316) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDown(LogicalPlan.scala:29) > at > org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDown(AnalysisHelper.scala:171) > at > org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDown$(AnalysisHelper.scala:169) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDown(LogicalPlan.scala:29) > at >