[ 
https://issues.apache.org/jira/browse/SPARK-11997?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Davies Liu resolved SPARK-11997.
--------------------------------
       Resolution: Fixed
    Fix Version/s: 1.6.0
                   2.0.0

Issue resolved by pull request 10001
[https://github.com/apache/spark/pull/10001]

> NPE when save a DataFrame as parquet and partitioned by long column
> -------------------------------------------------------------------
>
>                 Key: SPARK-11997
>                 URL: https://issues.apache.org/jira/browse/SPARK-11997
>             Project: Spark
>          Issue Type: Bug
>          Components: SQL
>    Affects Versions: 1.6.0
>            Reporter: Davies Liu
>            Priority: Blocker
>             Fix For: 2.0.0, 1.6.0
>
>
> {code}
> >>> sqlContext.range(1<<20).selectExpr("if(id % 10 = 0, null, (id % 111) - 
> >>> 50) AS n", "id").write.partitionBy("n").parquet("myid3")
> 15/11/25 12:05:57 ERROR InsertIntoHadoopFsRelation: Aborting job.
> java.lang.NullPointerException
>       at 
> org.apache.spark.sql.catalyst.InternalRow.getString(InternalRow.scala:32)
>       at 
> org.apache.spark.sql.sources.HadoopFsRelation$$anonfun$org$apache$spark$sql$sources$HadoopFsRelation$$castPartitionValuesToUserSchema$1$1.apply(interfaces.scala:610)
>       at 
> org.apache.spark.sql.sources.HadoopFsRelation$$anonfun$org$apache$spark$sql$sources$HadoopFsRelation$$castPartitionValuesToUserSchema$1$1.apply(interfaces.scala:608)
>       at 
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
>       at 
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
>       at scala.collection.immutable.Range.foreach(Range.scala:141)
>       at scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
>       at scala.collection.AbstractTraversable.map(Traversable.scala:105)
>       at 
> org.apache.spark.sql.sources.HadoopFsRelation.org$apache$spark$sql$sources$HadoopFsRelation$$castPartitionValuesToUserSchema$1(interfaces.scala:608)
>       at 
> org.apache.spark.sql.sources.HadoopFsRelation$$anonfun$org$apache$spark$sql$sources$HadoopFsRelation$$discoverPartitions$1.apply(interfaces.scala:616)
>       at 
> org.apache.spark.sql.sources.HadoopFsRelation$$anonfun$org$apache$spark$sql$sources$HadoopFsRelation$$discoverPartitions$1.apply(interfaces.scala:615)
>       at 
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
>       at 
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
>       at 
> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>       at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
>       at scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
>       at scala.collection.AbstractTraversable.map(Traversable.scala:105)
>       at 
> org.apache.spark.sql.sources.HadoopFsRelation.org$apache$spark$sql$sources$HadoopFsRelation$$discoverPartitions(interfaces.scala:615)
>       at 
> org.apache.spark.sql.sources.HadoopFsRelation.refresh(interfaces.scala:590)
>       at 
> org.apache.spark.sql.execution.datasources.parquet.ParquetRelation.refresh(ParquetRelation.scala:204)
>       at 
> org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1.apply$mcV$sp(InsertIntoHadoopFsRelation.scala:152)
>       at 
> org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1.apply(InsertIntoHadoopFsRelation.scala:108)
>       at 
> org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation$$anonfun$run$1.apply(InsertIntoHadoopFsRelation.scala:108)
>       at 
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
>       at 
> org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelation.run(InsertIntoHadoopFsRelation.scala:108)
>       at 
> org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult$lzycompute(commands.scala:58)
>       at 
> org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult(commands.scala:56)
>       at 
> org.apache.spark.sql.execution.ExecutedCommand.doExecute(commands.scala:70)
>       at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:133)
>       at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:131)
>       at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
>       at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:131)
>       at 
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:56)
>       at 
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:56)
>       at 
> org.apache.spark.sql.execution.datasources.ResolvedDataSource$.apply(ResolvedDataSource.scala:242)
>       at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:148)
>       at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:139)
>       at 
> org.apache.spark.sql.DataFrameWriter.parquet(DataFrameWriter.scala:329)
>       at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>       at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
>       at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>       at java.lang.reflect.Method.invoke(Method.java:606)
>       at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)
>       at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:381)
>       at py4j.Gateway.invoke(Gateway.java:259)
>       at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133)
>       at py4j.commands.CallCommand.execute(CallCommand.java:79)
>       at py4j.GatewayConnection.run(GatewayConnection.java:209)
>       at java.lang.Thread.run(Thread.java:745)
> {code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to