[ 
https://issues.apache.org/jira/browse/SPARK-18859?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15832516#comment-15832516
 ] 

Erik LaBianca edited comment on SPARK-18859 at 1/20/17 10:33 PM:
-----------------------------------------------------------------

Not quite a repro, but here's explain output.

{noformat}
val df = spark.read
      .format("jdbc")
      .options(DataSources.PostgresOptions + (
          "dbtable" -> s"""(select
                 profiles_contact_points.id,
                 remote_id
          |from profiles_contact_points
          |  left join profiles_contacts_connectors
          |    on profiles_contact_points.contact_id = 
profiles_contacts_connectors.contact_id
          | where profiles_contact_points.user_id = 1
        ) t""".stripMargin
        ))
      .load

df.printSchema()
df.explain(true)
df.map(_ != null).count()
{noformat}

Results in the following.
{noformat}
df: org.apache.spark.sql.DataFrame = [id: bigint, remote_id: string]
root
 |-- id: long (nullable = false)
 |-- remote_id: string (nullable = false)
== Parsed Logical Plan ==
Relation[id#2018L,remote_id#2019] JDBCRelation((select
                 profiles_contact_points.id,
                 remote_id
from profiles_contact_points
  left join profiles_contacts_connectors
    on profiles_contact_points.contact_id = 
profiles_contacts_connectors.contact_id
 where profiles_contact_points.user_id = 1
        ) t)
== Analyzed Logical Plan ==
id: bigint, remote_id: string
Relation[id#2018L,remote_id#2019] JDBCRelation((select
                 profiles_contact_points.id,
                 remote_id
from profiles_contact_points
  left join profiles_contacts_connectors
    on profiles_contact_points.contact_id = 
profiles_contacts_connectors.contact_id
 where profiles_contact_points.user_id = 1
        ) t)
== Optimized Logical Plan ==
Relation[id#2018L,remote_id#2019] JDBCRelation((select
                 profiles_contact_points.id,
                 remote_id
from profiles_contact_points
  left join profiles_contacts_connectors
    on profiles_contact_points.contact_id = 
profiles_contacts_connectors.contact_id
 where profiles_contact_points.user_id = 1
        ) t)
== Physical Plan ==
*Scan JDBCRelation((select
                 profiles_contact_points.id,
                 remote_id
from profiles_contact_points
  left join profiles_contacts_connectors
    on profiles_contact_points.contact_id = 
profiles_contacts_connectors.contact_id
 where profiles_contact_points.user_id = 1
        ) t) [id#2018L,remote_id#2019] ReadSchema: 
struct<id:bigint,remote_id:string>
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in 
stage 238.0 failed 4 times, most recent failure: Lost task 0.3 in stage 238.0 
(TID 55547, ip-x.ec2.internal): java.lang.NullPointerException
        at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.agg_doAggregateWithoutKey$(Unknown
 Source)
        at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown
 Source)
        at 
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
        at 
org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
        at 
org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:125)
        at 
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:79)
        at 
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:47)
        at org.apache.spark.scheduler.Task.run(Task.scala:86)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
Driver stacktrace:
  at 
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1454)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1442)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1441)
  at 
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
  at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
  at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1441)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:811)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:811)
  at scala.Option.foreach(Option.scala:257)
  at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:811)
  at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1667)
  at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1622)
  at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1611)
  at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
  at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:632)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:1873)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:1886)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:1899)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:1913)
  at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:912)
  at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
  at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
  at org.apache.spark.rdd.RDD.withScope(RDD.scala:358)
  at org.apache.spark.rdd.RDD.collect(RDD.scala:911)
  at 
org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:290)
  at 
org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1.apply(Dataset.scala:2193)
  at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57)
  at org.apache.spark.sql.Dataset.withNewExecutionId(Dataset.scala:2546)
  at 
org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$execute$1(Dataset.scala:2192)
  at 
org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collect(Dataset.scala:2199)
  at org.apache.spark.sql.Dataset$$anonfun$count$1.apply(Dataset.scala:2227)
  at org.apache.spark.sql.Dataset$$anonfun$count$1.apply(Dataset.scala:2226)
  at org.apache.spark.sql.Dataset.withCallback(Dataset.scala:2559)
  at org.apache.spark.sql.Dataset.count(Dataset.scala:2226)
  ... 170 elided
Caused by: java.lang.NullPointerException
  at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.agg_doAggregateWithoutKey$(Unknown
 Source)
  at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown
 Source)
  at 
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  at 
org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)
  at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  at 
org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:125)
  at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:79)
  at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:47)
  at org.apache.spark.scheduler.Task.run(Task.scala:86)
  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
  ... 3 more
{noformat}


was (Author: easel):
Not quite a repro, but here's explain output.

{noformat}
val df = spark.read
      .format("jdbc")
      .options(DataSources.PostgresOptions + (
          "dbtable" -> s"""(select
                 profiles_contact_points.id,
                 remote_id
          |from profiles_contact_points
          |  left join profiles_contacts_connectors
          |    on profiles_contact_points.contact_id = 
profiles_contacts_connectors.contact_id
          | where profiles_contact_points.user_id = 1
        ) t""".stripMargin
        ))
      .load
      //.as[ContactRemoteValue]
df.printSchema()
df.explain(true)
df.map(_ != null).count()
{noformat}

Results in the following.
{noformat}
df: org.apache.spark.sql.DataFrame = [id: bigint, remote_id: string]
root
 |-- id: long (nullable = false)
 |-- remote_id: string (nullable = false)
== Parsed Logical Plan ==
Relation[id#2018L,remote_id#2019] JDBCRelation((select
                 profiles_contact_points.id,
                 remote_id
from profiles_contact_points
  left join profiles_contacts_connectors
    on profiles_contact_points.contact_id = 
profiles_contacts_connectors.contact_id
 where profiles_contact_points.user_id = 1
        ) t)
== Analyzed Logical Plan ==
id: bigint, remote_id: string
Relation[id#2018L,remote_id#2019] JDBCRelation((select
                 profiles_contact_points.id,
                 remote_id
from profiles_contact_points
  left join profiles_contacts_connectors
    on profiles_contact_points.contact_id = 
profiles_contacts_connectors.contact_id
 where profiles_contact_points.user_id = 1
        ) t)
== Optimized Logical Plan ==
Relation[id#2018L,remote_id#2019] JDBCRelation((select
                 profiles_contact_points.id,
                 remote_id
from profiles_contact_points
  left join profiles_contacts_connectors
    on profiles_contact_points.contact_id = 
profiles_contacts_connectors.contact_id
 where profiles_contact_points.user_id = 1
        ) t)
== Physical Plan ==
*Scan JDBCRelation((select
                 profiles_contact_points.id,
                 remote_id
from profiles_contact_points
  left join profiles_contacts_connectors
    on profiles_contact_points.contact_id = 
profiles_contacts_connectors.contact_id
 where profiles_contact_points.user_id = 1
        ) t) [id#2018L,remote_id#2019] ReadSchema: 
struct<id:bigint,remote_id:string>
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in 
stage 238.0 failed 4 times, most recent failure: Lost task 0.3 in stage 238.0 
(TID 55547, ip-x.ec2.internal): java.lang.NullPointerException
        at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.agg_doAggregateWithoutKey$(Unknown
 Source)
        at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown
 Source)
        at 
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
        at 
org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
        at 
org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:125)
        at 
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:79)
        at 
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:47)
        at org.apache.spark.scheduler.Task.run(Task.scala:86)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
Driver stacktrace:
  at 
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1454)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1442)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1441)
  at 
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
  at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
  at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1441)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:811)
  at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:811)
  at scala.Option.foreach(Option.scala:257)
  at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:811)
  at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1667)
  at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1622)
  at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1611)
  at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
  at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:632)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:1873)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:1886)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:1899)
  at org.apache.spark.SparkContext.runJob(SparkContext.scala:1913)
  at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:912)
  at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
  at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
  at org.apache.spark.rdd.RDD.withScope(RDD.scala:358)
  at org.apache.spark.rdd.RDD.collect(RDD.scala:911)
  at 
org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:290)
  at 
org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1.apply(Dataset.scala:2193)
  at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57)
  at org.apache.spark.sql.Dataset.withNewExecutionId(Dataset.scala:2546)
  at 
org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$execute$1(Dataset.scala:2192)
  at 
org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collect(Dataset.scala:2199)
  at org.apache.spark.sql.Dataset$$anonfun$count$1.apply(Dataset.scala:2227)
  at org.apache.spark.sql.Dataset$$anonfun$count$1.apply(Dataset.scala:2226)
  at org.apache.spark.sql.Dataset.withCallback(Dataset.scala:2559)
  at org.apache.spark.sql.Dataset.count(Dataset.scala:2226)
  ... 170 elided
Caused by: java.lang.NullPointerException
  at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.agg_doAggregateWithoutKey$(Unknown
 Source)
  at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown
 Source)
  at 
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  at 
org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)
  at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  at 
org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:125)
  at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:79)
  at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:47)
  at org.apache.spark.scheduler.Task.run(Task.scala:86)
  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
  ... 3 more
{noformat}

> Catalyst codegen does not mark column as nullable when it should. Causes NPE
> ----------------------------------------------------------------------------
>
>                 Key: SPARK-18859
>                 URL: https://issues.apache.org/jira/browse/SPARK-18859
>             Project: Spark
>          Issue Type: Bug
>          Components: Optimizer, SQL
>    Affects Versions: 2.0.2
>            Reporter: Mykhailo Osypov
>            Priority: Critical
>
> When joining two tables via LEFT JOIN, columns in right table may be NULLs, 
> however catalyst codegen cannot recognize it.
> Example:
> {code:title=schema.sql|borderStyle=solid}
> create table masterdata.testtable(
>   id int not null,
>   age int
> );
> create table masterdata.jointable(
>   id int not null,
>   name text not null
> );
> {code}
> {code:title=query_to_select.sql|borderStyle=solid}
> (select t.id, t.age, j.name from masterdata.testtable t left join 
> masterdata.jointable j on t.id = j.id) as testtable;
> {code}
> {code:title=master code|borderStyle=solid}
> val df = sqlContext
>       .read
>       .format("jdbc")
>       .option("dbTable", "query to select")
>       ....
>       .load
> //df generated schema
> /*
> root
>  |-- id: integer (nullable = false)
>  |-- age: integer (nullable = true)
>  |-- name: string (nullable = false)
> */
> {code}
> {code:title=Codegen|borderStyle=solid}
> /* 038 */       scan_rowWriter.write(0, scan_value);
> /* 039 */
> /* 040 */       if (scan_isNull1) {
> /* 041 */         scan_rowWriter.setNullAt(1);
> /* 042 */       } else {
> /* 043 */         scan_rowWriter.write(1, scan_value1);
> /* 044 */       }
> /* 045 */
> /* 046 */       scan_rowWriter.write(2, scan_value2);
> {code}
> Since *j.name* is from right table of *left join* query, it may be null. 
> However generated schema doesn't think so (probably because it defined as 
> *name text not null*)
> {code:title=StackTrace|borderStyle=solid}
> java.lang.NullPointerException
>       at 
> org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter.write(UnsafeRowWriter.java:210)
>       at 
> org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown
>  Source)
>       at 
> org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
>       at 
> org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)
>       at 
> org.apache.spark.sql.execution.debug.package$DebugExec$$anonfun$3$$anon$1.hasNext(package.scala:146)
>       at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1763)
>       at org.apache.spark.rdd.RDD$$anonfun$count$1.apply(RDD.scala:1134)
>       at org.apache.spark.rdd.RDD$$anonfun$count$1.apply(RDD.scala:1134)
>       at 
> org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1899)
>       at 
> org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1899)
>       at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
>       at org.apache.spark.scheduler.Task.run(Task.scala:86)
>       at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
>       at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>       at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>       at java.lang.Thread.run(Thread.java:745)
> {code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to