[ 
https://issues.apache.org/jira/browse/SPARK-27234?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Mark Hamilton updated SPARK-27234:
----------------------------------
    Description: 
Heres a repro:
{code:java}
from pyspark.sql.functions import col, udf
fooUDF = udf(lambda p: "foo")

spark \
.readStream \
.format("rate") \
.load()\
.withColumn("foo", fooUDF(col("value")))\
.writeStream\
.format("console")\
.trigger(continuous="1 second").start() {code}
Error Message (All that Azure Databricks prints):

 
{code:java}
at 
org.apache.spark.sql.execution.streaming.continuous.WriteToContinuousDataSourceExec.doExecute(WriteToContinuousDataSourceExec.scala:62)
 at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:143)
 at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131)
 at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$5.apply(SparkPlan.scala:183)
 at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) 
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:180) 
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:131) at 
org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:114)
 at 
org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:114) 
at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$3$$anonfun$apply$1.apply(ContinuousExecution.scala:273)
 at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$3$$anonfun$apply$1.apply(ContinuousExecution.scala:269)
 at 
org.apache.spark.sql.execution.SQLExecution$$anonfun$withCustomExecutionEnv$1.apply(SQLExecution.scala:92)
 at 
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:233)
 at 
org.apache.spark.sql.execution.SQLExecution$.withCustomExecutionEnv(SQLExecution.scala:86)
 at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:163)
 at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$3.apply(ContinuousExecution.scala:269)
 at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$3.apply(ContinuousExecution.scala:269)
 at 
org.apache.spark.sql.execution.streaming.ProgressReporter$class.reportTimeTaken(ProgressReporter.scala:251)
 at 
org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:61)
 at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runContinuous(ContinuousExecution.scala:267)
 at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runActivatedStream(ContinuousExecution.scala:93)
 at 
org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:295)
 at 
org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:205)
 Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: 
Task 4 in stage 0.0 failed 4 times, most recent failure: Lost task 4.3 in stage 
0.0 (TID 9, 10.139.64.4, executor 0): 
org.apache.spark.sql.execution.streaming.continuous.ContinuousTaskRetryException:
 Continuous execution does not support task retry at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousDataSourceRDD.compute(ContinuousDataSourceRDD.scala:68)
 at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply$mcV$sp(ContinuousWriteRDD.scala:52)
 at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply(ContinuousWriteRDD.scala:51)
 at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply(ContinuousWriteRDD.scala:51)
 at 
org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1466)
 at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD.compute(ContinuousWriteRDD.scala:76)
 at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) at 
org.apache.spark.scheduler.Task.doRunTask(Task.scala:139) at 
org.apache.spark.scheduler.Task.run(Task.scala:112) at 
org.apache.spark.executor.Executor$TaskRunner$$anonfun$13.apply(Executor.scala:496)
 at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1432) at 
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:502) at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) 
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
at java.lang.Thread.run(Thread.java:748) Driver stacktrace: at 
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:2098)
 at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2086)
 at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2085)
 at 
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) 
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) at 
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2085) at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:1076)
 at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:1076)
 at scala.Option.foreach(Option.scala:257) at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1076)
 at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2317)
 at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2265)
 at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2253)
 at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49) at 
org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:873) at 
org.apache.spark.SparkContext.runJob(SparkContext.scala:2251) at 
org.apache.spark.SparkContext.runJob(SparkContext.scala:2273) at 
org.apache.spark.SparkContext.runJob(SparkContext.scala:2292) at 
org.apache.spark.SparkContext.runJob(SparkContext.scala:2317) at 
org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:961) at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) 
at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112) 
at org.apache.spark.rdd.RDD.withScope(RDD.scala:379) at 
org.apache.spark.rdd.RDD.collect(RDD.scala:960) at 
org.apache.spark.sql.execution.streaming.continuous.WriteToContinuousDataSourceExec.doExecute(WriteToContinuousDataSourceExec.scala:53)
 ... 22 more Caused by: 
org.apache.spark.sql.execution.streaming.continuous.ContinuousTaskRetryException:
 Continuous execution does not support task retry at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousDataSourceRDD.compute(ContinuousDataSourceRDD.scala:68)
 at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply$mcV$sp(ContinuousWriteRDD.scala:52)
 at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply(ContinuousWriteRDD.scala:51)
 at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply(ContinuousWriteRDD.scala:51)
 at 
org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1466)
 at 
org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD.compute(ContinuousWriteRDD.scala:76)
 at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) at 
org.apache.spark.scheduler.Task.doRunTask(Task.scala:139) at 
org.apache.spark.scheduler.Task.run(Task.scala:112) at 
org.apache.spark.executor.Executor$TaskRunner$$anonfun$13.apply(Executor.scala:496)
 at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1432) at 
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:502) at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) 
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
at java.lang.Thread.run(Thread.java:748)
{code}
 

  was:
Heres a repro

get_p_eng = udf(lambda p: "foo")

df = spark \
 .readStream \
 .format("rate") \
 .load()\
 .withColumn("p_eng", get_p_eng(col("value")))\
 .makeReply("p_eng")

df.isStreaming # Returns True for DataFrames that have streaming sources

df.printSchema

# Read all the csv files written atomically in a directory
df.writeStream.format("console").trigger(continuous="1 second").start()


> Continuous Streaming does not support python UDFs
> -------------------------------------------------
>
>                 Key: SPARK-27234
>                 URL: https://issues.apache.org/jira/browse/SPARK-27234
>             Project: Spark
>          Issue Type: Bug
>          Components: Structured Streaming
>    Affects Versions: 2.4.0
>         Environment: Azure Databricks 5.1
>            Reporter: Mark Hamilton
>            Priority: Major
>
> Heres a repro:
> {code:java}
> from pyspark.sql.functions import col, udf
> fooUDF = udf(lambda p: "foo")
> spark \
> .readStream \
> .format("rate") \
> .load()\
> .withColumn("foo", fooUDF(col("value")))\
> .writeStream\
> .format("console")\
> .trigger(continuous="1 second").start() {code}
> Error Message (All that Azure Databricks prints):
>  
> {code:java}
> at 
> org.apache.spark.sql.execution.streaming.continuous.WriteToContinuousDataSourceExec.doExecute(WriteToContinuousDataSourceExec.scala:62)
>  at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:143)
>  at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131)
>  at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$5.apply(SparkPlan.scala:183)
>  at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>  at 
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:180) at 
> org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:131) at 
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:114)
>  at 
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:114) 
> at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$3$$anonfun$apply$1.apply(ContinuousExecution.scala:273)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$3$$anonfun$apply$1.apply(ContinuousExecution.scala:269)
>  at 
> org.apache.spark.sql.execution.SQLExecution$$anonfun$withCustomExecutionEnv$1.apply(SQLExecution.scala:92)
>  at 
> org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:233)
>  at 
> org.apache.spark.sql.execution.SQLExecution$.withCustomExecutionEnv(SQLExecution.scala:86)
>  at 
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:163)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$3.apply(ContinuousExecution.scala:269)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$3.apply(ContinuousExecution.scala:269)
>  at 
> org.apache.spark.sql.execution.streaming.ProgressReporter$class.reportTimeTaken(ProgressReporter.scala:251)
>  at 
> org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:61)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runContinuous(ContinuousExecution.scala:267)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runActivatedStream(ContinuousExecution.scala:93)
>  at 
> org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:295)
>  at 
> org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:205)
>  Caused by: org.apache.spark.SparkException: Job aborted due to stage 
> failure: Task 4 in stage 0.0 failed 4 times, most recent failure: Lost task 
> 4.3 in stage 0.0 (TID 9, 10.139.64.4, executor 0): 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousTaskRetryException:
>  Continuous execution does not support task retry at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousDataSourceRDD.compute(ContinuousDataSourceRDD.scala:68)
>  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
> org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
> org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
> org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
> org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply$mcV$sp(ContinuousWriteRDD.scala:52)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply(ContinuousWriteRDD.scala:51)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply(ContinuousWriteRDD.scala:51)
>  at 
> org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1466)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD.compute(ContinuousWriteRDD.scala:76)
>  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
> org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) at 
> org.apache.spark.scheduler.Task.doRunTask(Task.scala:139) at 
> org.apache.spark.scheduler.Task.run(Task.scala:112) at 
> org.apache.spark.executor.Executor$TaskRunner$$anonfun$13.apply(Executor.scala:496)
>  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1432) at 
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:502) at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>  at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>  at java.lang.Thread.run(Thread.java:748) Driver stacktrace: at 
> org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:2098)
>  at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2086)
>  at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2085)
>  at 
> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>  at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) at 
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2085) 
> at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:1076)
>  at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:1076)
>  at scala.Option.foreach(Option.scala:257) at 
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1076)
>  at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2317)
>  at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2265)
>  at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2253)
>  at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49) at 
> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:873) at 
> org.apache.spark.SparkContext.runJob(SparkContext.scala:2251) at 
> org.apache.spark.SparkContext.runJob(SparkContext.scala:2273) at 
> org.apache.spark.SparkContext.runJob(SparkContext.scala:2292) at 
> org.apache.spark.SparkContext.runJob(SparkContext.scala:2317) at 
> org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:961) at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>  at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
>  at org.apache.spark.rdd.RDD.withScope(RDD.scala:379) at 
> org.apache.spark.rdd.RDD.collect(RDD.scala:960) at 
> org.apache.spark.sql.execution.streaming.continuous.WriteToContinuousDataSourceExec.doExecute(WriteToContinuousDataSourceExec.scala:53)
>  ... 22 more Caused by: 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousTaskRetryException:
>  Continuous execution does not support task retry at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousDataSourceRDD.compute(ContinuousDataSourceRDD.scala:68)
>  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
> org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
> org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
> org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
> org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60) at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply$mcV$sp(ContinuousWriteRDD.scala:52)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply(ContinuousWriteRDD.scala:51)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply(ContinuousWriteRDD.scala:51)
>  at 
> org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1466)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD.compute(ContinuousWriteRDD.scala:76)
>  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:340) at 
> org.apache.spark.rdd.RDD.iterator(RDD.scala:304) at 
> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) at 
> org.apache.spark.scheduler.Task.doRunTask(Task.scala:139) at 
> org.apache.spark.scheduler.Task.run(Task.scala:112) at 
> org.apache.spark.executor.Executor$TaskRunner$$anonfun$13.apply(Executor.scala:496)
>  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1432) at 
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:502) at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>  at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>  at java.lang.Thread.run(Thread.java:748)
> {code}
>  



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to