chopperChen created SPARK-38884:
-----------------------------------

             Summary: java.util.NoSuchElementException: key not found: 
numPartitions
                 Key: SPARK-38884
                 URL: https://issues.apache.org/jira/browse/SPARK-38884
             Project: Spark
          Issue Type: Bug
          Components: SQL
    Affects Versions: 3.0.1
         Environment: hadoop 3.1.1

spark 3.0.1
            Reporter: chopperChen


When running function spark.sql, the logs print 
{*}_java.util.NoSuchElementException: key not found: numPartitions_{*}.

My sql like:

 
{code:java}
// hr is a partition column
select * from (select col1, '24' as hr from table1
               union all select col1, '2' as hr from table2
               union all select col1, hr from table3) df1
inner join (select col1, '24' as hr from table4
            union all select col1, '2' as hr from table5
            union all select col1, hr from table6) df2
on df1.col1=df2.col1
{code}
 

*exception:*

Caused by: java.util.NoSuchElementException: key not found: numPartitions
    at scala.collection.MapLike.default(MapLike.scala:235)
    at scala.collection.MapLike.default$(MapLike.scala:234)
    at scala.collection.AbstractMap.default(Map.scala:63)
    at scala.collection.MapLike.apply(MapLike.scala:144)
    at scala.collection.MapLike.apply$(MapLike.scala:143)
    at scala.collection.AbstractMap.apply(Map.scala:63)
    at 
org.apache.spark.sql.execution.FileSourceScanExec.$anonfun$sendDriverMetrics$1(DataSourceScanExec.scala:197)
    at 
org.apache.spark.sql.execution.FileSourceScanExec.$anonfun$sendDriverMetrics$1$adapted(DataSourceScanExec.scala:197)
    at scala.collection.mutable.HashMap.$anonfun$foreach$1(HashMap.scala:149)
    at scala.collection.mutable.HashTable.foreachEntry(HashTable.scala:237)
    at scala.collection.mutable.HashTable.foreachEntry$(HashTable.scala:230)
    at scala.collection.mutable.HashMap.foreachEntry(HashMap.scala:44)
    at scala.collection.mutable.HashMap.foreach(HashMap.scala:149)
    at 
org.apache.spark.sql.execution.FileSourceScanExec.sendDriverMetrics(DataSourceScanExec.scala:197)
    at 
org.apache.spark.sql.execution.FileSourceScanExec.inputRDD$lzycompute(DataSourceScanExec.scala:407)
    at 
org.apache.spark.sql.execution.FileSourceScanExec.inputRDD(DataSourceScanExec.scala:390)
    at 
org.apache.spark.sql.execution.FileSourceScanExec.doExecuteColumnar(DataSourceScanExec.scala:485)
    at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$executeColumnar$1(SparkPlan.scala:202)
    at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:213)
    at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
    at 
org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:210)
    at 
org.apache.spark.sql.execution.SparkPlan.executeColumnar(SparkPlan.scala:198)
    at 
org.apache.spark.sql.execution.InputAdapter.doExecuteColumnar(WholeStageCodegenExec.scala:519)
    at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$executeColumnar$1(SparkPlan.scala:202)
    at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:213)
    at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
    at 
org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:210)
    at 
org.apache.spark.sql.execution.SparkPlan.executeColumnar(SparkPlan.scala:198)
    at 
org.apache.spark.sql.execution.ColumnarToRowExec.inputRDDs(Columnar.scala:196)
    at 
org.apache.spark.sql.execution.FilterExec.inputRDDs(basicPhysicalOperators.scala:133)
    at 
org.apache.spark.sql.execution.ProjectExec.inputRDDs(basicPhysicalOperators.scala:47)
    at 
org.apache.spark.sql.execution.WholeStageCodegenExec.doExecute(WholeStageCodegenExec.scala:720)
    at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:175)
    at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:213)
    at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
    at 
org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:210)
    at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:171)
    at 
org.apache.spark.sql.execution.UnionExec.$anonfun$doExecute$5(basicPhysicalOperators.scala:644)
    at 
scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
    at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
    at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
    at scala.collection.TraversableLike.map(TraversableLike.scala:238)
    at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
    at scala.collection.AbstractTraversable.map(Traversable.scala:108)
    at 
org.apache.spark.sql.execution.UnionExec.doExecute(basicPhysicalOperators.scala:644)
    at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:175)
    at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:213)
    at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
    at 
org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:210)
    at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:171)
    at 
org.apache.spark.sql.execution.InputAdapter.inputRDD(WholeStageCodegenExec.scala:525)
    at 
org.apache.spark.sql.execution.InputRDDCodegen.inputRDDs(WholeStageCodegenExec.scala:453)
    at 
org.apache.spark.sql.execution.InputRDDCodegen.inputRDDs$(WholeStageCodegenExec.scala:452)
    at 
org.apache.spark.sql.execution.InputAdapter.inputRDDs(WholeStageCodegenExec.scala:496)
    at 
org.apache.spark.sql.execution.joins.BroadcastHashJoinExec.inputRDDs(BroadcastHashJoinExec.scala:74)
    at 
org.apache.spark.sql.execution.ProjectExec.inputRDDs(basicPhysicalOperators.scala:47)
    at 
org.apache.spark.sql.execution.joins.BroadcastHashJoinExec.inputRDDs(BroadcastHashJoinExec.scala:74)
    at 
org.apache.spark.sql.execution.ProjectExec.inputRDDs(basicPhysicalOperators.scala:47)
    at 
org.apache.spark.sql.execution.joins.BroadcastHashJoinExec.inputRDDs(BroadcastHashJoinExec.scala:74)
    at 
org.apache.spark.sql.execution.ProjectExec.inputRDDs(basicPhysicalOperators.scala:47)
    at 
org.apache.spark.sql.execution.joins.BroadcastHashJoinExec.inputRDDs(BroadcastHashJoinExec.scala:74)
    at 
org.apache.spark.sql.execution.ProjectExec.inputRDDs(basicPhysicalOperators.scala:47)
    at 
org.apache.spark.sql.execution.WholeStageCodegenExec.doExecute(WholeStageCodegenExec.scala:720)
    at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:175)
    at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:213)
    at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
    at 
org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:210)
    at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:171)
    at 
org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.inputRDD$lzycompute(ShuffleExchangeExec.scala:106)
    at 
org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.inputRDD(ShuffleExchangeExec.scala:106)
    at 
org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.shuffleDependency$lzycompute(ShuffleExchangeExec.scala:139)
    at 
org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.shuffleDependency(ShuffleExchangeExec.scala:137)
    at 
org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.$anonfun$doExecute$1(ShuffleExchangeExec.scala:154)
    at 
org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52)



--
This message was sent by Atlassian Jira
(v8.20.1#820001)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to