[ 
https://issues.apache.org/jira/browse/SPARK-30229?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16998247#comment-16998247
 ] 

Ankit Raj Boudh commented on SPARK-30229:
-----------------------------------------

please provide me what operation you have done so that i will reproduce this 
issue.

> java.lang.NullPointerException         at 
> org.apache.spark.SparkContext.getPreferredLocs(SparkContext.scala:1783)
> -----------------------------------------------------------------------------------------------------------------
>
>                 Key: SPARK-30229
>                 URL: https://issues.apache.org/jira/browse/SPARK-30229
>             Project: Spark
>          Issue Type: Bug
>          Components: Spark Core
>    Affects Versions: 2.2.1
>            Reporter: SeaAndHill
>            Priority: Major
>
> 2019-12-12 11:52:00 INFO JobScheduler:54 - Added jobs for time 1576122720000 
> ms
> 2019-12-12 11:52:00 INFO JobScheduler:54 - Starting job streaming job 
> 1576122720000 ms.0 from job set of time 1576122720000 ms
> 2019-12-12 11:52:00 INFO CarbonSparkSqlParser:54 - Parsing command: 
> event_detail_temp
> 2019-12-12 11:52:00 INFO CarbonLateDecodeRule:95 - skip CarbonOptimizer
> 2019-12-12 11:52:00 INFO CarbonLateDecodeRule:72 - Skip CarbonOptimizer
> 2019-12-12 11:52:00 INFO CarbonLateDecodeRule:95 - skip CarbonOptimizer
> 2019-12-12 11:52:00 INFO CarbonLateDecodeRule:72 - Skip CarbonOptimizer
> 2019-12-12 11:52:00 INFO JobScheduler:54 - Finished job streaming job 
> 1576122720000 ms.0 from job set of time 1576122720000 ms
> 2019-12-12 11:52:00 ERROR JobScheduler:91 - Error running job streaming job 
> 1576122720000 ms.0
> java.lang.NullPointerException
>  at org.apache.spark.SparkContext.getPreferredLocs(SparkContext.scala:1783)
>  at 
> org.apache.spark.rdd.DefaultPartitionCoalescer.currPrefLocs(CoalescedRDD.scala:178)
>  at 
> org.apache.spark.rdd.DefaultPartitionCoalescer$PartitionLocations$$anonfun$getAllPrefLocs$2.apply(CoalescedRDD.scala:196)
>  at 
> org.apache.spark.rdd.DefaultPartitionCoalescer$PartitionLocations$$anonfun$getAllPrefLocs$2.apply(CoalescedRDD.scala:195)
>  at 
> scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
>  at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
>  at 
> org.apache.spark.rdd.DefaultPartitionCoalescer$PartitionLocations.getAllPrefLocs(CoalescedRDD.scala:195)
>  at 
> org.apache.spark.rdd.DefaultPartitionCoalescer$PartitionLocations.<init>(CoalescedRDD.scala:188)
>  at 
> org.apache.spark.rdd.DefaultPartitionCoalescer.coalesce(CoalescedRDD.scala:391)
>  at org.apache.spark.rdd.CoalescedRDD.getPartitions(CoalescedRDD.scala:91)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
>  at scala.Option.getOrElse(Option.scala:121)
>  at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
>  at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
>  at scala.Option.getOrElse(Option.scala:121)
>  at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
>  at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
>  at scala.Option.getOrElse(Option.scala:121)
>  at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
>  at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
>  at scala.Option.getOrElse(Option.scala:121)
>  at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
>  at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
>  at scala.Option.getOrElse(Option.scala:121)
>  at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
>  at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
>  at scala.Option.getOrElse(Option.scala:121)
>  at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
>  at org.apache.spark.SparkContext.runJob(SparkContext.scala:2094)
>  at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:936)
>  at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>  at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
>  at org.apache.spark.rdd.RDD.withScope(RDD.scala:362)
>  at org.apache.spark.rdd.RDD.collect(RDD.scala:935)
>  at 
> org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:278)
>  at 
> org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:2861)
>  at 
> org.apache.spark.sql.Dataset$$anonfun$collectAsList$1.apply(Dataset.scala:2399)
>  at 
> org.apache.spark.sql.Dataset$$anonfun$collectAsList$1.apply(Dataset.scala:2398)
>  at org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.scala:2842)
>  at 
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)
>  at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2841)
>  at org.apache.spark.sql.Dataset.collectAsList(Dataset.scala:2398)
>  at 
> com.talkingdata.aeplus.analytics.FinanceSparkStreaming$1.call(FinanceSparkStreaming.java:191)
>  at 
> com.talkingdata.aeplus.analytics.FinanceSparkStreaming$1.call(FinanceSparkStreaming.java:147)
>  at 
> org.apache.spark.streaming.api.java.JavaDStreamLike$$anonfun$foreachRDD$1.apply(JavaDStreamLike.scala:272)
>  at 
> org.apache.spark.streaming.api.java.JavaDStreamLike$$anonfun$foreachRDD$1.apply(JavaDStreamLike.scala:272)
>  at 
> org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1$$anonfun$apply$mcV$sp$3.apply(DStream.scala:628)
>  at 
> org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1$$anonfun$apply$mcV$sp$3.apply(DStream.scala:628)
>  at 
> org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply$mcV$sp(ForEachDStream.scala:51)
>  at 
> org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:51)
>  at 
> org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:51)
>  at 
> org.apache.spark.streaming.dstream.DStream.createRDDWithLocalProperties(DStream.scala:416)
>  at 
> org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply$mcV$sp(ForEachDStream.scala:50)
>  at 
> org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:50)
>  at 
> org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:50)
>  at scala.util.Try$.apply(Try.scala:192)
>  at org.apache.spark.streaming.scheduler.Job.run(Job.scala:39)
>  at 
> org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply$mcV$sp(JobScheduler.scala:257)
>  at 
> org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply(JobScheduler.scala:257)
>  at 
> org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply(JobScheduler.scala:257)
>  at scala.util.DynamicVariable.withValue(DynamicVariable.scala:58)
>  at 
> org.apache.spark.streaming.scheduler.JobScheduler$JobHandler.run(JobScheduler.scala:256)
>  at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>  at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>  at java.lang.Thread.run(Thread.java:748)
> 2019-12-12 11:54:00 INFO JobScheduler:54 - Added jobs for time 1576122840000 
> ms
> 2019-12-12 11:54:00 INFO JobScheduler:54 - Starting job streaming job 
> 1576122840000 ms.0 from job set of time 1576122840000 ms
> 2019-12-12 11:54:00 INFO CarbonSparkSqlParser:54 - Parsing command: 
> event_detail_temp
> 2019-12-12 11:54:00 INFO CarbonLateDecodeRule:95 - skip CarbonOptimizer
> 2019-12-12 11:54:00 INFO CarbonLateDecodeRule:72 - Skip CarbonOptimizer
> 2019-12-12 11:54:00 INFO CarbonLateDecodeRule:95 - skip CarbonOptimizer
> 2019-12-12 11:54:00 INFO CarbonLateDecodeRule:72 - Skip CarbonOptimizer
> 2019-12-12 11:54:00 INFO JobScheduler:54 - Finished job streaming job 
> 1576122840000 ms.0 from job set of time 1576122840000 ms
> 2019-12-12 11:54:00 ERROR JobScheduler:91 - Error running job streaming job 
> 1576122840000 ms.0
> java.lang.NullPointerException
>  at org.apache.spark.SparkContext.getPreferredLocs(SparkContext.scala:1783)
>  at 
> org.apache.spark.rdd.DefaultPartitionCoalescer.currPrefLocs(CoalescedRDD.scala:178)
>  at 
> org.apache.spark.rdd.DefaultPartitionCoalescer$PartitionLocations$$anonfun$getAllPrefLocs$2.apply(CoalescedRDD.scala:196)
>  at 
> org.apache.spark.rdd.DefaultPartitionCoalescer$PartitionLocations$$anonfun$getAllPrefLocs$2.apply(CoalescedRDD.scala:195)
>  at 
> scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
>  at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
>  at 
> org.apache.spark.rdd.DefaultPartitionCoalescer$PartitionLocations.getAllPrefLocs(CoalescedRDD.scala:195)
>  at 
> org.apache.spark.rdd.DefaultPartitionCoalescer$PartitionLocations.<init>(CoalescedRDD.scala:188)
>  at 
> org.apache.spark.rdd.DefaultPartitionCoalescer.coalesce(CoalescedRDD.scala:391)
>  at org.apache.spark.rdd.CoalescedRDD.getPartitions(CoalescedRDD.scala:91)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
>  at scala.Option.getOrElse(Option.scala:121)
>  at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
>  at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
>  at scala.Option.getOrElse(Option.scala:121)
>  at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
>  at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
>  at scala.Option.getOrElse(Option.scala:121)
>  at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
>  at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
>  at scala.Option.getOrElse(Option.scala:121)
>  at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
>  at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
>  at scala.Option.getOrElse(Option.scala:121)
>  at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
>  at 
> org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
>  at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
>  at scala.Option.getOrElse(Option.scala:121)
>  at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
>  at org.apache.spark.SparkContext.runJob(SparkContext.scala:2094)
>  at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:936)
>  at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>  at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
>  at org.apache.spark.rdd.RDD.withScope(RDD.scala:362)
>  at org.apache.spark.rdd.RDD.collect(RDD.scala:935)
>  at 
> org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:278)



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to