[ 
https://issues.apache.org/jira/browse/SPARK-37808?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17469037#comment-17469037
 ] 

jerryMa edited comment on SPARK-37808 at 1/5/22, 6:28 AM:
----------------------------------------------------------

[~hyukjin.kwon] Thanks,i'll give the example to test for you  alter removing 
sensitive information later


was (Author: JIRAUSER283003):
[~hyukjin.kwon] Thanks,i'll give the example to test for you  alter removing 
sensitive information 

> Invalid call to nullable on unresolved object, tree: 'corp_name
> ---------------------------------------------------------------
>
>                 Key: SPARK-37808
>                 URL: https://issues.apache.org/jira/browse/SPARK-37808
>             Project: Spark
>          Issue Type: Bug
>          Components: SQL
>    Affects Versions: 2.4.2, 3.0.0, 3.2.0
>            Reporter: jerryMa
>            Priority: Major
>
> i use spark-sql run a sql file,but also ocurrs error like below : 
> catalyst.analysis.UnresolvedException: Invalid call to nullable on unresolved 
> object, tree: 'corp_name, exactly the reall error is that a column( dt ) 
> doesn't exists in sql.
> I also test sql  in spark 3.0,3.2,it also  have the same errors.  somebody 
> can explain it ?
> {code:java}
> org.apache.spark.sql.catalyst.analysis.UnresolvedException: Invalid call to 
> nullable on unresolved object, tree: 'corp_name
>       at 
> org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute.nullable(unresolved.scala:106)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.Union.$anonfun$output$5(basicLogicalOperators.scala:235)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.Union.$anonfun$output$5$adapted(basicLogicalOperators.scala:235)
>       at 
> scala.collection.LinearSeqOptimized.exists(LinearSeqOptimized.scala:95)
>       at 
> scala.collection.LinearSeqOptimized.exists$(LinearSeqOptimized.scala:92)
>       at scala.collection.immutable.List.exists(List.scala:89)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.Union.$anonfun$output$4(basicLogicalOperators.scala:235)
>       at 
> scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:237)
>       at scala.collection.immutable.List.foreach(List.scala:392)
>       at scala.collection.TraversableLike.map(TraversableLike.scala:237)
>       at scala.collection.TraversableLike.map$(TraversableLike.scala:230)
>       at scala.collection.immutable.List.map(List.scala:298)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.Union.output(basicLogicalOperators.scala:234)
>       at 
> org.apache.spark.sql.hive.HiveAnalysis$$anonfun$apply$3.applyOrElse(HiveStrategies.scala:160)
>       at 
> org.apache.spark.sql.hive.HiveAnalysis$$anonfun$apply$3.applyOrElse(HiveStrategies.scala:148)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsDown$2(AnalysisHelper.scala:108)
>       at 
> org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsDown$1(AnalysisHelper.scala:108)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:194)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsDown(AnalysisHelper.scala:106)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsDown$(AnalysisHelper.scala:104)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsDown(LogicalPlan.scala:29)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperators(AnalysisHelper.scala:73)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperators$(AnalysisHelper.scala:72)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:29)
>       at 
> org.apache.spark.sql.hive.HiveAnalysis$.apply(HiveStrategies.scala:148)
>       at 
> org.apache.spark.sql.hive.HiveAnalysis$.apply(HiveStrategies.scala:147)
>       at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:87)
>       at 
> scala.collection.IndexedSeqOptimized.foldLeft(IndexedSeqOptimized.scala:60)
>       at 
> scala.collection.IndexedSeqOptimized.foldLeft$(IndexedSeqOptimized.scala:68)
>       at scala.collection.mutable.ArrayBuffer.foldLeft(ArrayBuffer.scala:49)
>       at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:84)
>       at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:76)
>       at scala.collection.immutable.List.foreach(List.scala:392)
>       at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:76)
>       at 
> org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:127)
>       at 
> org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:121)
>       at 
> org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:106)
>       at 
> org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:201)
>       at 
> org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:105)
>       at 
> org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:57)
>       at 
> org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:55)
>       at 
> org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:47)
>       at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:78)
>       at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:642)
>       at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:694)
>       at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLDriver.run(SparkSQLDriver.scala:62)
>       at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processCmd(SparkSQLCLIDriver.scala:445)
>       at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.$anonfun$processLine$1(SparkSQLCLIDriver.scala:385)
>       at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.$anonfun$processLine$1$adapted(SparkSQLCLIDriver.scala:382)
>       at 
> scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
>       at 
> scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
>       at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
>       at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processLine(SparkSQLCLIDriver.scala:382)
>       at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:311)
>       at 
> org.apache.hadoop.hive.cli.CliDriver.processReader(CliDriver.java:409)
>       at org.apache.hadoop.hive.cli.CliDriver.processFile(CliDriver.java:425)
>       at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver$.main(SparkSQLCLIDriver.scala:200)
>       at 
> org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala)
>       at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>       at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
>       at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>       at java.lang.reflect.Method.invoke(Method.java:498)
>       at 
> org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
>       at 
> org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:849)
>       at 
> org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:167)
>       at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:195)
>       at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)
>       at 
> org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:924)
>       at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:933)
>       at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
> 22/01/04 18:43:44 INFO YarnClientSchedulerBackend: Interrupting monitor thread
> 22/01/04 18:43:44 INFO YarnClientSchedulerBackend: Shutting down all executors
> 22/01/04 18:43:44 INFO YarnClientSchedulerBackend: Stopped {code}



--
This message was sent by Atlassian Jira
(v8.20.1#820001)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to