[ 
https://issues.apache.org/jira/browse/SPARK-5303?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Mauro Pirrone closed SPARK-5303.
--------------------------------
    Resolution: Not a Problem

> applySchema returns NullPointerException
> ----------------------------------------
>
>                 Key: SPARK-5303
>                 URL: https://issues.apache.org/jira/browse/SPARK-5303
>             Project: Spark
>          Issue Type: Bug
>          Components: Spark Core
>    Affects Versions: 1.2.0
>            Reporter: Mauro Pirrone
>
> The following code snippet returns NullPointerException:
> val result = .....
>       
> val rows = result.take(10)
> val rowRdd = SparkManager.getContext().parallelize(rows, 1)
> val schemaRdd = SparkManager.getSQLContext().applySchema(rowRdd, 
> result.schema)
> java.lang.NullPointerException
>       at 
> org.apache.spark.sql.catalyst.expressions.AttributeReference.hashCode(namedExpressions.scala:147)
>       at scala.runtime.ScalaRunTime$.hash(ScalaRunTime.scala:210)
>       at scala.util.hashing.MurmurHash3.listHash(MurmurHash3.scala:168)
>       at scala.util.hashing.MurmurHash3$.seqHash(MurmurHash3.scala:216)
>       at scala.collection.LinearSeqLike$class.hashCode(LinearSeqLike.scala:53)
>       at scala.collection.immutable.List.hashCode(List.scala:84)
>       at scala.runtime.ScalaRunTime$.hash(ScalaRunTime.scala:210)
>       at scala.util.hashing.MurmurHash3.productHash(MurmurHash3.scala:63)
>       at scala.util.hashing.MurmurHash3$.productHash(MurmurHash3.scala:210)
>       at scala.runtime.ScalaRunTime$._hashCode(ScalaRunTime.scala:172)
>       at 
> org.apache.spark.sql.execution.LogicalRDD.hashCode(ExistingRDD.scala:58)
>       at scala.runtime.ScalaRunTime$.hash(ScalaRunTime.scala:210)
>       at 
> scala.collection.mutable.HashTable$HashUtils$class.elemHashCode(HashTable.scala:398)
>       at scala.collection.mutable.HashMap.elemHashCode(HashMap.scala:39)
>       at 
> scala.collection.mutable.HashTable$class.findEntry(HashTable.scala:130)
>       at scala.collection.mutable.HashMap.findEntry(HashMap.scala:39)
>       at scala.collection.mutable.HashMap.get(HashMap.scala:69)
>       at 
> scala.collection.mutable.MapLike$class.getOrElseUpdate(MapLike.scala:187)
>       at scala.collection.mutable.AbstractMap.getOrElseUpdate(Map.scala:91)
>       at 
> scala.collection.TraversableLike$$anonfun$groupBy$1.apply(TraversableLike.scala:329)
>       at 
> scala.collection.TraversableLike$$anonfun$groupBy$1.apply(TraversableLike.scala:327)
>       at 
> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>       at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
>       at 
> scala.collection.TraversableLike$class.groupBy(TraversableLike.scala:327)
>       at scala.collection.AbstractTraversable.groupBy(Traversable.scala:105)
>       at 
> org.apache.spark.sql.catalyst.analysis.NewRelationInstances$.apply(MultiInstanceRelation.scala:44)
>       at 
> org.apache.spark.sql.catalyst.analysis.NewRelationInstances$.apply(MultiInstanceRelation.scala:40)
>       at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$apply$1$$anonfun$apply$2.apply(RuleExecutor.scala:61)
>       at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$apply$1$$anonfun$apply$2.apply(RuleExecutor.scala:59)
>       at 
> scala.collection.IndexedSeqOptimized$class.foldl(IndexedSeqOptimized.scala:51)
>       at 
> scala.collection.IndexedSeqOptimized$class.foldLeft(IndexedSeqOptimized.scala:60)
>       at scala.collection.mutable.WrappedArray.foldLeft(WrappedArray.scala:34)
>       at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$apply$1.apply(RuleExecutor.scala:59)
>       at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$apply$1.apply(RuleExecutor.scala:51)
>       at scala.collection.immutable.List.foreach(List.scala:318)
>       at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor.apply(RuleExecutor.scala:51)
>       at 
> org.apache.spark.sql.SQLContext$QueryExecution.analyzed$lzycompute(SQLContext.scala:411)
>       at 
> org.apache.spark.sql.SQLContext$QueryExecution.analyzed(SQLContext.scala:411)
>       at org.apache.spark.sql.SchemaRDD.schema$lzycompute(SchemaRDD.scala:135)
>       at org.apache.spark.sql.SchemaRDD.schema(SchemaRDD.scala:135)



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to