[ 
https://issues.apache.org/jira/browse/SPARK-30585?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17019425#comment-17019425
 ] 

Rashmi commented on SPARK-30585:
--------------------------------

- cte.sql
- datetime.sql
- describe-table-column.sql
03:48:33.567 WARN org.apache.spark.sql.execution.command.DropTableCommand: 
org.apache.spark.sql.AnalysisException: Table or view not found: default.t; 
line 1 pos 14
org.apache.spark.sql.AnalysisException: Table or view not found: default.t; 
line 1 pos 14
 at 
org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:47)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.org$apache$spark$sql$catalyst$analysis$Analyzer$ResolveRelations$$lookupTableFromCatalog(Analyzer.scala:733)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.resolveRelation(Analyzer.scala:685)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$8.applyOrElse(Analyzer.scala:715)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$8.applyOrElse(Analyzer.scala:708)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1$$anonfun$apply$1.apply(AnalysisHelper.scala:90)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1$$anonfun$apply$1.apply(AnalysisHelper.scala:90)
 at 
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1.apply(AnalysisHelper.scala:89)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1.apply(AnalysisHelper.scala:86)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:194)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.resolveOperatorsUp(AnalysisHelper.scala:86)
 at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:29)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1$$anonfun$1.apply(AnalysisHelper.scala:87)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1$$anonfun$1.apply(AnalysisHelper.scala:87)
 at 
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326)
 at 
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
 at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1.apply(AnalysisHelper.scala:87)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1.apply(AnalysisHelper.scala:86)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:194)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.resolveOperatorsUp(AnalysisHelper.scala:86)
 at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:29)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:708)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:654)
 at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:87)
 at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:84)
 at 
scala.collection.LinearSeqOptimized$class.foldLeft(LinearSeqOptimized.scala:124)
 at scala.collection.immutable.List.foldLeft(List.scala:84)
 at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:84)
 at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:76)
 at scala.collection.immutable.List.foreach(List.scala:392)
 at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:76)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:127)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$35.apply(Analyzer.scala:699)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$35.apply(Analyzer.scala:692)
 at 
org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withAnalysisContext(Analyzer.scala:87)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.resolveRelation(Analyzer.scala:692)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.resolveRelation(Analyzer.scala:703)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$8.applyOrElse(Analyzer.scala:715)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$8.applyOrElse(Analyzer.scala:708)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1$$anonfun$apply$1.apply(AnalysisHelper.scala:90)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1$$anonfun$apply$1.apply(AnalysisHelper.scala:90)
 at 
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1.apply(AnalysisHelper.scala:89)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$$anonfun$resolveOperatorsUp$1.apply(AnalysisHelper.scala:86)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:194)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.resolveOperatorsUp(AnalysisHelper.scala:86)
 at 
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUp(LogicalPlan.scala:29)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:708)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:654)
 at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:87)
 at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:84)
 at 
scala.collection.LinearSeqOptimized$class.foldLeft(LinearSeqOptimized.scala:124)
 at scala.collection.immutable.List.foldLeft(List.scala:84)
 at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:84)
 at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:76)
 at scala.collection.immutable.List.foreach(List.scala:392)
 at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:76)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:127)
 at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:121)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$executeAndCheck$1.apply(Analyzer.scala:106)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$executeAndCheck$1.apply(Analyzer.scala:105)
 at 
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:201)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:105)
 at 
org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:57)
 at 
org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:55)
 at 
org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:47)
 at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:79)
 at org.apache.spark.sql.SparkSession.table(SparkSession.scala:628)
 at org.apache.spark.sql.execution.command.DropTableCommand.run(ddl.scala:211)
 at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
 at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
 at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:79)
 at org.apache.spark.sql.Dataset$$anonfun$6.apply(Dataset.scala:195)
 at org.apache.spark.sql.Dataset$$anonfun$6.apply(Dataset.scala:195)
 at org.apache.spark.sql.Dataset$$anonfun$53.apply(Dataset.scala:3365)
 at 
org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:78)
 at 
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
 at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
 at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3364)
 at org.apache.spark.sql.Dataset.<init>(Dataset.scala:195)
 at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:80)
 at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:642)
 at 
org.apache.spark.sql.SQLQueryTestSuite.org$apache$spark$sql$SQLQueryTestSuite$$getNormalizedResult(SQLQueryTestSuite.scala:266)
 at 
org.apache.spark.sql.SQLQueryTestSuite$$anonfun$11.apply(SQLQueryTestSuite.scala:196)
 at 
org.apache.spark.sql.SQLQueryTestSuite$$anonfun$11.apply(SQLQueryTestSuite.scala:195)
 at 
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
 at 
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
 at 
scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
 at scala.collection.mutable.WrappedArray.foreach(WrappedArray.scala:35)
 at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
 at scala.collection.AbstractTraversable.map(Traversable.scala:104)
 at 
org.apache.spark.sql.SQLQueryTestSuite.org$apache$spark$sql$SQLQueryTestSuite$$runQueries(SQLQueryTestSuite.scala:195)
 at 
org.apache.spark.sql.SQLQueryTestSuite.org$apache$spark$sql$SQLQueryTestSuite$$runTest(SQLQueryTestSuite.scala:162)
 at 
org.apache.spark.sql.SQLQueryTestSuite$$anonfun$org$apache$spark$sql$SQLQueryTestSuite$$createScalaTestCase$2.apply$mcV$sp(SQLQueryTestSuite.scala:134)
 at 
org.apache.spark.sql.SQLQueryTestSuite$$anonfun$org$apache$spark$sql$SQLQueryTestSuite$$createScalaTestCase$2.apply(SQLQueryTestSuite.scala:134)
 at 
org.apache.spark.sql.SQLQueryTestSuite$$anonfun$org$apache$spark$sql$SQLQueryTestSuite$$createScalaTestCase$2.apply(SQLQueryTestSuite.scala:134)
 at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
 at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
 at org.scalatest.Transformer.apply(Transformer.scala:22)
 at org.scalatest.Transformer.apply(Transformer.scala:20)
 at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
 at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:103)
 at org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:183)
 at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:196)
 at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:196)
 at org.scalatest.SuperEngine.runTestImpl(Engine.scala:289)
 at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:196)
 at 
org.apache.spark.sql.SQLQueryTestSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SQLQueryTestSuite.scala:83)
 at org.scalatest.BeforeAndAfterEach$class.runTest(BeforeAndAfterEach.scala:221)
 at org.apache.spark.sql.SQLQueryTestSuite.runTest(SQLQueryTestSuite.scala:83)
 at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:229)
 at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:229)
 at 
org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:396)
 at 
org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:384)
 at scala.collection.immutable.List.foreach(List.scala:392)
 at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:384)
 at 
org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:379)
 at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:461)
 at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:229)
 at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
 at org.scalatest.Suite$class.run(Suite.scala:1147)
 at 
org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
 at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:233)
 at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:233)
 at org.scalatest.SuperEngine.runImpl(Engine.scala:521)
 at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:233)
 at 
org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:52)
 at 
org.scalatest.BeforeAndAfterAll$class.liftedTree1$1(BeforeAndAfterAll.scala:213)
 at org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:210)
 at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:52)
 at org.scalatest.Suite$class.callExecuteOnSuite$1(Suite.scala:1210)
 at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1257)
 at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1255)
 at 
scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
 at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
 at org.scalatest.Suite$class.runNestedSuites(Suite.scala:1255)
 at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:30)
 at org.scalatest.Suite$class.run(Suite.scala:1144)
 at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:30)
 at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:45)
 at 
org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$1.apply(Runner.scala:1340)
 at 
org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$1.apply(Runner.scala:1334)
 at scala.collection.immutable.List.foreach(List.scala:392)
 at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:1334)
 at 
org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1011)
 at 
org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1010)
 at 
org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:1500)
 at 
org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1010)
 at org.scalatest.tools.Runner$.main(Runner.scala:827)
 at org.scalatest.tools.Runner.main(Runner.scala)
Caused by: org.apache.spark.sql.catalyst.analysis.NoSuchTableException: Table 
or view 't' not found in database 'default';
 at 
org.apache.spark.sql.catalyst.catalog.ExternalCatalog$class.requireTableExists(ExternalCatalog.scala:48)
 at 
org.apache.spark.sql.catalyst.catalog.InMemoryCatalog.requireTableExists(InMemoryCatalog.scala:45)
 at 
org.apache.spark.sql.catalyst.catalog.InMemoryCatalog.getTable(InMemoryCatalog.scala:326)
 at 
org.apache.spark.sql.catalyst.catalog.ExternalCatalogWithListener.getTable(ExternalCatalogWithListener.scala:138)
 at 
org.apache.spark.sql.catalyst.catalog.SessionCatalog.lookupRelation(SessionCatalog.scala:701)
 at 
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.org$apache$spark$sql$catalyst$analysis$Analyzer$ResolveRelations$$lookupTableFromCatalog(Analyzer.scala:730)
 ... 150 more
- describe.sql

> scalatest fails for Apache Spark SQL project
> --------------------------------------------
>
>                 Key: SPARK-30585
>                 URL: https://issues.apache.org/jira/browse/SPARK-30585
>             Project: Spark
>          Issue Type: Bug
>          Components: Build
>    Affects Versions: 2.4.0
>            Reporter: Rashmi
>            Priority: Blocker
>
> Error logs:-
> 23:36:49.039 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in 
> stage 3.0 (TID 6, localhost, executor driver): TaskKilled (Stage cancelled)
> 23:36:49.039 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in 
> stage 3.0 (TID 7, localhost, executor driver): TaskKilled (Stage cancelled)
> 23:36:51.354 WARN 
> org.apache.spark.sql.execution.streaming.ProcessingTimeExecutor: Current 
> batch is falling behind. The trigger interval is 100 milliseconds, but spent 
> 1854 milliseconds
> 23:36:51.381 WARN 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousQueuedDataReader$DataReaderThread:
>  data reader thread failed
> org.apache.spark.SparkException: Exception thrown in awaitResult:
>  at org.apache.spark.util.ThreadUtils$.awaitResult(ThreadUtils.scala:226)
>  at org.apache.spark.rpc.RpcTimeout.awaitResult(RpcTimeout.scala:75)
>  at org.apache.spark.rpc.RpcEndpointRef.askSync(RpcEndpointRef.scala:92)
>  at org.apache.spark.rpc.RpcEndpointRef.askSync(RpcEndpointRef.scala:76)
>  at 
> org.apache.spark.sql.execution.streaming.sources.ContinuousMemoryStreamInputPartitionReader.getRecord(ContinuousMemoryStream.scala:195)
>  at 
> org.apache.spark.sql.execution.streaming.sources.ContinuousMemoryStreamInputPartitionReader.next(ContinuousMemoryStream.scala:181)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousQueuedDataReader$DataReaderThread.run(ContinuousQueuedDataReader.scala:143)
> Caused by: org.apache.spark.SparkException: Could not find 
> ContinuousMemoryStreamRecordEndpoint-f7d4460c-9f4e-47ee-a846-258b34964852-9.
>  at org.apache.spark.rpc.netty.Dispatcher.postMessage(Dispatcher.scala:160)
>  at 
> org.apache.spark.rpc.netty.Dispatcher.postLocalMessage(Dispatcher.scala:135)
>  at org.apache.spark.rpc.netty.NettyRpcEnv.ask(NettyRpcEnv.scala:229)
>  at org.apache.spark.rpc.netty.NettyRpcEndpointRef.ask(NettyRpcEnv.scala:523)
>  at org.apache.spark.rpc.RpcEndpointRef.askSync(RpcEndpointRef.scala:91)
>  ... 4 more
> 23:36:51.389 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in 
> stage 4.0 (TID 9, localhost, executor driver): TaskKilled (Stage cancelled)
> 23:36:51.390 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in 
> stage 4.0 (TID 8, localhost, executor driver): TaskKilled (Stage cancelled)
> - flatMap
> 23:36:51.754 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in 
> stage 5.0 (TID 11, localhost, executor driver): TaskKilled (Stage cancelled)
> 23:36:51.754 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in 
> stage 5.0 (TID 10, localhost, executor driver): TaskKilled (Stage cancelled)
> 23:36:52.248 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in 
> stage 6.0 (TID 13, localhost, executor driver): TaskKilled (Stage cancelled)
> 23:36:52.249 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in 
> stage 6.0 (TID 12, localhost, executor driver): TaskKilled (Stage cancelled)
> - filter
> 23:36:52.611 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in 
> stage 7.0 (TID 14, localhost, executor driver): TaskKilled (Stage cancelled)
> 23:36:52.611 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in 
> stage 7.0 (TID 15, localhost, executor driver): TaskKilled (Stage cancelled)
> - deduplicate
> - timestamp
> 23:36:53.015 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in 
> stage 8.0 (TID 16, localhost, executor driver): TaskKilled (Stage cancelled)
> 23:36:53.015 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in 
> stage 8.0 (TID 17, localhost, executor driver): TaskKilled (Stage cancelled)
> - subquery alias
> 23:36:53.572 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in 
> stage 9.0 (TID 19, localhost, executor driver): TaskKilled (Stage cancelled)
> 23:36:53.572 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in 
> stage 9.0 (TID 18, localhost, executor driver): TaskKilled (Stage cancelled)
> 23:36:53.953 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in 
> stage 10.0 (TID 21, localhost, executor driver): TaskKilled (Stage cancelled)
> 23:36:53.953 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in 
> stage 10.0 (TID 20, localhost, executor driver): TaskKilled (Stage cancelled)
> 23:36:54.552 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in 
> stage 11.0 (TID 23, localhost, executor driver): TaskKilled (Stage cancelled)
> 23:36:54.552 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in 
> stage 11.0 (TID 22, localhost, executor driver): TaskKilled (Stage cancelled)
> - repeatedly restart
> 23:36:54.591 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in 
> stage 12.0 (TID 25, localhost, executor driver): TaskKilled (killed via 
> SparkContext.killTaskAttempt)
> 23:36:54.594 ERROR org.apache.spark.util.Utils: Aborting task
> org.apache.spark.sql.execution.streaming.continuous.ContinuousTaskRetryException:
>  Continuous execution does not support task retry
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousDataSourceRDD.compute(ContinuousDataSourceRDD.scala:68)
>  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
>  at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
>  at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply$mcV$sp(ContinuousWriteRDD.scala:52)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply(ContinuousWriteRDD.scala:51)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply(ContinuousWriteRDD.scala:51)
>  at 
> org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1394)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD.compute(ContinuousWriteRDD.scala:76)
>  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
>  at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
>  at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>  at org.apache.spark.scheduler.Task.run(Task.scala:121)
>  at 
> org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:402)
>  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
>  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:408)
>  at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>  at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>  at java.lang.Thread.run(Thread.java:748)
> 23:36:54.594 ERROR 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD: 
> Writer for partition 1 is aborting.
> 23:36:54.594 ERROR 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD: 
> Writer for partition 1 aborted.
> 23:36:54.595 ERROR org.apache.spark.executor.Executor: Exception in task 1.1 
> in stage 12.0 (TID 26)
> org.apache.spark.sql.execution.streaming.continuous.ContinuousTaskRetryException:
>  Continuous execution does not support task retry
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousDataSourceRDD.compute(ContinuousDataSourceRDD.scala:68)
>  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
>  at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
>  at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply$mcV$sp(ContinuousWriteRDD.scala:52)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply(ContinuousWriteRDD.scala:51)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD$$anonfun$compute$1.apply(ContinuousWriteRDD.scala:51)
>  at 
> org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1394)
>  at 
> org.apache.spark.sql.execution.streaming.continuous.ContinuousWriteRDD.compute(ContinuousWriteRDD.scala:76)
>  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
>  at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
>  at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>  at org.apache.spark.scheduler.Task.run(Task.scala:121)
>  at 
> org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:402)
>  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
>  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:408)
>  at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>  at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>  
> SPARK-10849: jdbc CreateTableColumnTypes duplicate columns
> - SPARK-10849: jdbc CreateTableColumnTypes invalid columns
> 23:38:30.300 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 
> in stage 76.0 (TID 98)
> org.h2.jdbc.JdbcBatchUpdateException: NULL not allowed for column "NAME"; SQL 
> statement:
> INSERT INTO TEST.PEOPLE1 ("NAME","THEID") VALUES (?,?) [23502-195]
>  at 
> org.h2.jdbc.JdbcPreparedStatement.executeBatch(JdbcPreparedStatement.java:1234)
>  at 
> org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.savePartition(JdbcUtils.scala:672)
>  at 
> org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anonfun$saveTable$1.apply(JdbcUtils.scala:834)
>  at 
> org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anonfun$saveTable$1.apply(JdbcUtils.scala:834)
>  at 
> org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$28.apply(RDD.scala:935)
>  at 
> org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$28.apply(RDD.scala:935)
>  at 
> org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2101)
>  at 
> org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2101)
>  at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>  at org.apache.spark.scheduler.Task.run(Task.scala:121)
>  at 
> org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:402)
>  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
>  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:408)
>  at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>  at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>  at java.lang.Thread.run(Thread.java:748)
> Caused by: org.h2.jdbc.JdbcSQLException: NULL not allowed for column "NAME"; 
> SQL statement:
> INSERT INTO TEST.PEOPLE1 ("NAME","THEID") VALUES (?,?) [23502-195]
>  at org.h2.message.DbException.getJdbcSQLException(DbException.java:345)
>  at org.h2.message.DbException.get(DbException.java:179)
>  at org.h2.message.DbException.get(DbException.java:155)
>  at org.h2.table.Column.validateConvertUpdateSequence(Column.java:345)
>  at org.h2.table.Table.validateConvertUpdateSequence(Table.java:793)
>  at org.h2.command.dml.Insert.insertRows(Insert.java:151)
>  at org.h2.command.dml.Insert.update(Insert.java:114)
>  at org.h2.command.CommandContainer.update(CommandContainer.java:101)
>  at org.h2.command.Command.executeUpdate(Command.java:260)
>  at 
> org.h2.jdbc.JdbcPreparedStatement.executeUpdateInternal(JdbcPreparedStatement.java:164)
>  at 
> org.h2.jdbc.JdbcPreparedStatement.executeBatch(JdbcPreparedStatement.java:1215)
>  ... 15 more
> org.h2.jdbc.JdbcSQLException: NULL not allowed for column "NAME"; SQL 
> statement:
> INSERT INTO TEST.PEOPLE1 ("NAME","THEID") VALUES (?,?) [23502-195]
>  at org.h2.message.DbException.getJdbcSQLException(DbException.java:345)
>  at org.h2.message.DbException.get(DbException.java:179)
>  at org.h2.message.DbException.get(DbException.java:155)
>  at org.h2.table.Column.validateConvertUpdateSequence(Column.java:345)
>  at org.h2.table.Table.validateConvertUpdateSequence(Table.java:793)
>  at org.h2.command.dml.Insert.insertRows(Insert.java:151)
>  at org.h2.command.dml.Insert.update(Insert.java:114)
>  at org.h2.command.CommandContainer.update(CommandContainer.java:101)
>  at org.h2.command.Command.executeUpdate(Command.java:260)
>  at 
> org.h2.jdbc.JdbcPreparedStatement.executeUpdateInternal(JdbcPreparedStatement.java:164)
>  at 
> org.h2.jdbc.JdbcPreparedStatement.executeBatch(JdbcPreparedStatement.java:1215)
>  at 
> org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.savePartition(JdbcUtils.scala:672)
>  at 
> org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anonfun$saveTable$1.apply(JdbcUtils.scala:834)
>  at 
> org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anonfun$saveTable$1.apply(JdbcUtils.scala:834)
>  at 
> org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$28.apply(RDD.scala:935)
>  at 
> org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$28.apply(RDD.scala:935)
>  at 
> org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2101)
>  at 
> org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2101)
>  at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>  at org.apache.spark.scheduler.Task.run(Task.scala:121)
>  at 
> org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:402)
>  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
>  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:408)
>  at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>  at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>  at java.lang.Thread.run(Thread.java:748)
> 23:38:30.305 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in 
> stage 76.0 (TID 98, localhost, executor driver): 
> org.h2.jdbc.JdbcBatchUpdateException: NULL not allowed for column "NAME"; SQL 
> statement:
> INSERT INTO TEST.PEOPLE1 ("NAME","THEID") VALUES (?,?) [23502-195]
>  at 
> org.h2.jdbc.JdbcPreparedStatement.executeBatch(JdbcPreparedStatement.java:1234)
>  at 
> org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.savePartition(JdbcUtils.scala:672)
>  at 
> org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anonfun$saveTable$1.apply(JdbcUtils.scala:834)
>  at 
> org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anonfun$saveTable$1.apply(JdbcUtils.scala:834)
>  at 
> org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$28.apply(RDD.scala:935)
>  at 
> org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$28.apply(RDD.scala:935)
>  at 
> org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2101)
>  at 
> org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2101)
>  at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>  at org.apache.spark.scheduler.Task.run(Task.scala:121)
>  at 
> org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:402)
>  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
>  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:408)
>  at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>  at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>  at java.lang.Thread.run(Thread.java:748)
> Caused by: org.h2.jdbc.JdbcSQLException: NULL not allowed for column "NAME"; 
> SQL statement:
> INSERT INTO TEST.PEOPLE1 ("NAME","THEID") VALUES (?,?) [23502-195]
>  at org.h2.message.DbException.getJdbcSQLException(DbException.java:345)
>  at org.h2.message.DbException.get(DbException.java:179)
>  at org.h2.message.DbException.get(DbException.java:155)
>  at org.h2.table.Column.validateConvertUpdateSequence(Column.java:345)
>  at org.h2.table.Table.validateConvertUpdateSequence(Table.java:793)
>  at org.h2.command.dml.Insert.insertRows(Insert.java:151)
>  at org.h2.command.dml.Insert.update(Insert.java:114)
>  at org.h2.command.CommandContainer.update(CommandContainer.java:101)
>  at org.h2.command.Command.executeUpdate(Command.java:260)
>  at 
> org.h2.jdbc.JdbcPreparedStatement.executeUpdateInternal(JdbcPreparedStatement.java:164)
>  at 
> org.h2.jdbc.JdbcPreparedStatement.executeBatch(JdbcPreparedStatement.java:1215)
>  ... 15 more
> org.h2.jdbc.JdbcSQLException: NULL not allowed for column "NAME"; SQL 
> statement:
> INSERT INTO TEST.PEOPLE1 ("NAME","THEID") VALUES (?,?) [23502-195]
>  at org.h2.message.DbException.getJdbcSQLException(DbException.java:345)
>  at org.h2.message.DbException.get(DbException.java:179)
>  at org.h2.message.DbException.get(DbException.java:155)
>  at org.h2.table.Column.validateConvertUpdateSequence(Column.java:345)
>  at org.h2.table.Table.validateConvertUpdateSequence(Table.java:793)
>  at org.h2.command.dml.Insert.insertRows(Insert.java:151)
>  at org.h2.command.dml.Insert.update(Insert.java:114)
>  at org.h2.command.CommandContainer.update(CommandContainer.java:101)
>  at org.h2.command.Command.executeUpdate(Command.java:260)
>  at 
> org.h2.jdbc.JdbcPreparedStatement.executeUpdateInternal(JdbcPreparedStatement.java:164)
>  at 
> org.h2.jdbc.JdbcPreparedStatement.executeBatch(JdbcPreparedStatement.java:1215)
>  at 
> org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.savePartition(JdbcUtils.scala:672)
>  at 
> org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anonfun$saveTable$1.apply(JdbcUtils.scala:834)
>  at 
> org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$$anonfun$saveTable$1.apply(JdbcUtils.scala:834)
>  at 
> org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$28.apply(RDD.scala:935)
>  at 
> org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$28.apply(RDD.scala:935)
>  at 
> org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2101)
>  at 
> org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2101)
>  at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
>  at org.apache.spark.scheduler.Task.run(Task.scala:121)
>  at 
> org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:402)
>  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
>  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:408)
>  at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>  at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>  at java.lang.Thread.run(Thread.java:748)
> 23:38:30.305 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 
> 76.0 failed 1 times; aborting job
> - SPARK-19726: INSERT null to a NOT NULL column
> - SPARK-23856 Spark jdbc setQueryTimeout option !!! IGNORED !!!
> OuterJoinSuite:
>  
>  



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to