[ https://issues.apache.org/jira/browse/SPARK-15192?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
Cheng Lian updated SPARK-15192: ------------------------------- Description: When we create a Dataset from an RDD of rows with a specific schema, if the nullability of a value does not match the nullability defined in the schema, we will throw an exception that is not easy to understand. It will be good to verify the nullability in a more explicit way. {code} import org.apache.spark.sql.types._ import org.apache.spark.sql.Row val schema = new StructType().add("a", StringType, false).add("b", StringType, false) val rdd = sc.parallelize(Row(null, "123") :: Row("234", null) :: Nil) spark.createDataFrame(rdd, schema).show {code} {noformat} java.lang.RuntimeException: Error while decoding: java.lang.NullPointerException createexternalrow(if (isnull(input[0, string])) null else input[0, string].toString, if (isnull(input[1, string])) null else input[1, string].toString, StructField(a,StringType,false), StructField(b,StringType,false)) :- if (isnull(input[0, string])) null else input[0, string].toString : :- isnull(input[0, string]) : : +- input[0, string] : :- null : +- input[0, string].toString : +- input[0, string] +- if (isnull(input[1, string])) null else input[1, string].toString :- isnull(input[1, string]) : +- input[1, string] :- null +- input[1, string].toString +- input[1, string] at org.apache.spark.sql.catalyst.encoders.ExpressionEncoder.fromRow(ExpressionEncoder.scala:244) at org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1$$anonfun$apply$13.apply(Dataset.scala:2119) at org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1$$anonfun$apply$13.apply(Dataset.scala:2119) at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234) at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234) at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) at scala.collection.TraversableLike$class.map(TraversableLike.scala:234) at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:186) at org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1.apply(Dataset.scala:2119) at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57) at org.apache.spark.sql.Dataset.withNewExecutionId(Dataset.scala:2407) at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$execute$1(Dataset.scala:2118) at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collect(Dataset.scala:2125) at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:1859) at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:1858) at org.apache.spark.sql.Dataset.withTypedCallback(Dataset.scala:2437) at org.apache.spark.sql.Dataset.head(Dataset.scala:1858) at org.apache.spark.sql.Dataset.take(Dataset.scala:2075) at org.apache.spark.sql.Dataset.showString(Dataset.scala:239) at org.apache.spark.sql.Dataset.show(Dataset.scala:530) at org.apache.spark.sql.Dataset.show(Dataset.scala:490) at org.apache.spark.sql.Dataset.show(Dataset.scala:499) ... 50 elided Caused by: java.lang.NullPointerException at org.apache.spark.sql.catalyst.expressions.GeneratedClass$SpecificSafeProjection.apply(Unknown Source) at org.apache.spark.sql.catalyst.encoders.ExpressionEncoder.fromRow(ExpressionEncoder.scala:241) ... 72 more {noformat} was: When we create a Dataset from an RDD of rows with a specific schema, if the nullability of a value does not match the nullability defined in the schema, we will throw an exception that is not easy to understand. It will be good to verify the nullability in a more explicit way. {code} import org.apache.spark.sql.types._ import org.apache.spark.sql.Row val schema = new StructType().add("a", StringType, false).add("b", StringType, false) val rdd = sc.parallelize(Row(null, "123") :: Row("234", null) :: Nil) spark.createDataFrame(rdd, schema).show java.lang.RuntimeException: Error while decoding: java.lang.NullPointerException createexternalrow(if (isnull(input[0, string])) null else input[0, string].toString, if (isnull(input[1, string])) null else input[1, string].toString, StructField(a,StringType,false), StructField(b,StringType,false)) :- if (isnull(input[0, string])) null else input[0, string].toString : :- isnull(input[0, string]) : : +- input[0, string] : :- null : +- input[0, string].toString : +- input[0, string] +- if (isnull(input[1, string])) null else input[1, string].toString :- isnull(input[1, string]) : +- input[1, string] :- null +- input[1, string].toString +- input[1, string] at org.apache.spark.sql.catalyst.encoders.ExpressionEncoder.fromRow(ExpressionEncoder.scala:244) at org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1$$anonfun$apply$13.apply(Dataset.scala:2119) at org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1$$anonfun$apply$13.apply(Dataset.scala:2119) at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234) at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234) at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) at scala.collection.TraversableLike$class.map(TraversableLike.scala:234) at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:186) at org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1.apply(Dataset.scala:2119) at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57) at org.apache.spark.sql.Dataset.withNewExecutionId(Dataset.scala:2407) at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$execute$1(Dataset.scala:2118) at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collect(Dataset.scala:2125) at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:1859) at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:1858) at org.apache.spark.sql.Dataset.withTypedCallback(Dataset.scala:2437) at org.apache.spark.sql.Dataset.head(Dataset.scala:1858) at org.apache.spark.sql.Dataset.take(Dataset.scala:2075) at org.apache.spark.sql.Dataset.showString(Dataset.scala:239) at org.apache.spark.sql.Dataset.show(Dataset.scala:530) at org.apache.spark.sql.Dataset.show(Dataset.scala:490) at org.apache.spark.sql.Dataset.show(Dataset.scala:499) ... 50 elided Caused by: java.lang.NullPointerException at org.apache.spark.sql.catalyst.expressions.GeneratedClass$SpecificSafeProjection.apply(Unknown Source) at org.apache.spark.sql.catalyst.encoders.ExpressionEncoder.fromRow(ExpressionEncoder.scala:241) ... 72 more {code} > RowEncoder needs to verify nullability in a more explicit way > ------------------------------------------------------------- > > Key: SPARK-15192 > URL: https://issues.apache.org/jira/browse/SPARK-15192 > Project: Spark > Issue Type: Bug > Components: SQL > Reporter: Yin Huai > > When we create a Dataset from an RDD of rows with a specific schema, if the > nullability of a value does not match the nullability defined in the schema, > we will throw an exception that is not easy to understand. > It will be good to verify the nullability in a more explicit way. > {code} > import org.apache.spark.sql.types._ > import org.apache.spark.sql.Row > val schema = new StructType().add("a", StringType, false).add("b", > StringType, false) > val rdd = sc.parallelize(Row(null, "123") :: Row("234", null) :: Nil) > spark.createDataFrame(rdd, schema).show > {code} > {noformat} > java.lang.RuntimeException: Error while decoding: > java.lang.NullPointerException > createexternalrow(if (isnull(input[0, string])) null else input[0, > string].toString, if (isnull(input[1, string])) null else input[1, > string].toString, StructField(a,StringType,false), > StructField(b,StringType,false)) > :- if (isnull(input[0, string])) null else input[0, string].toString > : :- isnull(input[0, string]) > : : +- input[0, string] > : :- null > : +- input[0, string].toString > : +- input[0, string] > +- if (isnull(input[1, string])) null else input[1, string].toString > :- isnull(input[1, string]) > : +- input[1, string] > :- null > +- input[1, string].toString > +- input[1, string] > at > org.apache.spark.sql.catalyst.encoders.ExpressionEncoder.fromRow(ExpressionEncoder.scala:244) > at > org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1$$anonfun$apply$13.apply(Dataset.scala:2119) > at > org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1$$anonfun$apply$13.apply(Dataset.scala:2119) > at > scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234) > at > scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234) > at > scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) > at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) > at scala.collection.TraversableLike$class.map(TraversableLike.scala:234) > at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:186) > at > org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1.apply(Dataset.scala:2119) > at > org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57) > at org.apache.spark.sql.Dataset.withNewExecutionId(Dataset.scala:2407) > at > org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$execute$1(Dataset.scala:2118) > at > org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collect(Dataset.scala:2125) > at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:1859) > at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:1858) > at org.apache.spark.sql.Dataset.withTypedCallback(Dataset.scala:2437) > at org.apache.spark.sql.Dataset.head(Dataset.scala:1858) > at org.apache.spark.sql.Dataset.take(Dataset.scala:2075) > at org.apache.spark.sql.Dataset.showString(Dataset.scala:239) > at org.apache.spark.sql.Dataset.show(Dataset.scala:530) > at org.apache.spark.sql.Dataset.show(Dataset.scala:490) > at org.apache.spark.sql.Dataset.show(Dataset.scala:499) > ... 50 elided > Caused by: java.lang.NullPointerException > at > org.apache.spark.sql.catalyst.expressions.GeneratedClass$SpecificSafeProjection.apply(Unknown > Source) > at > org.apache.spark.sql.catalyst.encoders.ExpressionEncoder.fromRow(ExpressionEncoder.scala:241) > ... 72 more > {noformat} -- This message was sent by Atlassian JIRA (v6.3.4#6332) --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org For additional commands, e-mail: issues-h...@spark.apache.org