[ 
https://issues.apache.org/jira/browse/SPARK-24696?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Maryann Xue updated SPARK-24696:
--------------------------------
    Description: 
The following test case would cause a "max iterations reached" error, i.e., an 
infinite loop in the optimizer.
{code}
   test("optimization infinite loop") {
    withTempDir { dir =>
      val path = dir.getCanonicalPath
      import testImplicits._
      Seq((3, "a")).toDF("id", 
"value").write.format("parquet").mode("overwrite").save(path)
      import org.apache.spark.sql.functions.udf
      val filterIt = udf((value: Int) => value > 0).asNondeterministic
      spark.read.load(path)
        .where(filterIt('id))
        .where('id < 0)
        .select('id)
        .collect
    }
  }
{code}
Error message:
{code}
Max iterations (100) reached for batch Operator Optimization before Inferring 
Filters, tree:
Filter (id#11 < 0)
+- Project [id#11]
   +- Filter if (isnull(id#11)) null else UDF(id#11)
      +- Relation[id#11,value#12] parquet

org.apache.spark.sql.catalyst.errors.package$TreeNodeException: Max iterations 
(100) reached for batch Operator Optimization before Inferring Filters, tree:
Filter (id#11 < 0)
+- Project [id#11]
   +- Filter if (isnull(id#11)) null else UDF(id#11)
      +- Relation[id#11,value#12] parquet

        at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:117)
        at 
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:76)
        at scala.collection.immutable.List.foreach(List.scala:392)
        at 
org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:76)
        at 
org.apache.spark.sql.execution.QueryExecution.optimizedPlan$lzycompute(QueryExecution.scala:66)
        at 
org.apache.spark.sql.execution.QueryExecution.optimizedPlan(QueryExecution.scala:66)
        at 
org.apache.spark.sql.execution.QueryExecution.sparkPlan$lzycompute(QueryExecution.scala:72)
        at 
org.apache.spark.sql.execution.QueryExecution.sparkPlan(QueryExecution.scala:68)
        at 
org.apache.spark.sql.execution.QueryExecution.executedPlan$lzycompute(QueryExecution.scala:77)
        at 
org.apache.spark.sql.execution.QueryExecution.executedPlan(QueryExecution.scala:77)
        at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3286)
        at org.apache.spark.sql.Dataset.collect(Dataset.scala:2745)
        at 
org.apache.spark.sql.DataFrameSuite$$anonfun$94$$anonfun$apply$mcV$sp$199.apply(DataFrameSuite.scala:2343)
        at 
org.apache.spark.sql.DataFrameSuite$$anonfun$94$$anonfun$apply$mcV$sp$199.apply(DataFrameSuite.scala:2333)
        at 
org.apache.spark.sql.test.SQLTestUtilsBase$class.withTempDir(SQLTestUtils.scala:215)
        at 
org.apache.spark.sql.DataFrameSuite.withTempDir(DataFrameSuite.scala:43)
        at 
org.apache.spark.sql.DataFrameSuite$$anonfun$94.apply$mcV$sp(DataFrameSuite.scala:2333)
        at 
org.apache.spark.sql.DataFrameSuite$$anonfun$94.apply(DataFrameSuite.scala:2333)
        at 
org.apache.spark.sql.DataFrameSuite$$anonfun$94.apply(DataFrameSuite.scala:2333)
        at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
        at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
        at org.scalatest.Transformer.apply(Transformer.scala:22)
        at org.scalatest.Transformer.apply(Transformer.scala:20)
        at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
        at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:103)
        at 
org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:183)
        at 
org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:196)
        at 
org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:196)
        at org.scalatest.SuperEngine.runTestImpl(Engine.scala:289)
        at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:196)
        at 
org.apache.spark.sql.DataFrameSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(DataFrameSuite.scala:43)
        at 
org.scalatest.BeforeAndAfterEach$class.runTest(BeforeAndAfterEach.scala:221)
        at org.apache.spark.sql.DataFrameSuite.runTest(DataFrameSuite.scala:43)
        at 
org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:229)
        at 
org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:229)
        at 
org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:396)
        at 
org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:384)
        at scala.collection.immutable.List.foreach(List.scala:392)
        at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:384)
        at 
org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:379)
        at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:461)
        at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:229)
        at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
        at org.scalatest.Suite$class.run(Suite.scala:1147)
        at 
org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
        at 
org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:233)
        at 
org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:233)
        at org.scalatest.SuperEngine.runImpl(Engine.scala:521)
        at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:233)
        at 
org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:52)
        at 
org.scalatest.BeforeAndAfterAll$class.liftedTree1$1(BeforeAndAfterAll.scala:213)
        at 
org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:210)
        at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:52)
        at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:45)
        at 
org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$1.apply(Runner.scala:1340)
        at 
org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$1.apply(Runner.scala:1334)
        at scala.collection.immutable.List.foreach(List.scala:392)
        at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:1334)
        at 
org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1011)
        at 
org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1010)
        at 
org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:1500)
        at 
org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1010)
        at org.scalatest.tools.Runner$.run(Runner.scala:850)
        at org.scalatest.tools.Runner.run(Runner.scala)
        at 
org.jetbrains.plugins.scala.testingSupport.scalaTest.ScalaTestRunner.runScalaTest2(ScalaTestRunner.java:131)
        at 
org.jetbrains.plugins.scala.testingSupport.scalaTest.ScalaTestRunner.main(ScalaTestRunner.java:28)
{code}

  was:
{code}

   test("optimization infinite loop") {
    withTempDir { dir =>
      val path = dir.getCanonicalPath
      import testImplicits._
      Seq((3, "a")).toDF("id", 
"value").write.format("parquet").mode("overwrite").save(path)
      import org.apache.spark.sql.functions.udf
      val filterIt = udf((value: Int) => value > 0).asNondeterministic
      spark.read.load(path)
        .where(filterIt('id))
        .where('id < 0)
        .select('id)
        .collect
    }
  }

{code}


> ColumnPruning rule fails to remove extra Project
> ------------------------------------------------
>
>                 Key: SPARK-24696
>                 URL: https://issues.apache.org/jira/browse/SPARK-24696
>             Project: Spark
>          Issue Type: Improvement
>          Components: SQL
>    Affects Versions: 2.3.1
>            Reporter: Xiao Li
>            Assignee: Maryann Xue
>            Priority: Major
>
> The following test case would cause a "max iterations reached" error, i.e., 
> an infinite loop in the optimizer.
> {code}
>    test("optimization infinite loop") {
>     withTempDir { dir =>
>       val path = dir.getCanonicalPath
>       import testImplicits._
>       Seq((3, "a")).toDF("id", 
> "value").write.format("parquet").mode("overwrite").save(path)
>       import org.apache.spark.sql.functions.udf
>       val filterIt = udf((value: Int) => value > 0).asNondeterministic
>       spark.read.load(path)
>         .where(filterIt('id))
>         .where('id < 0)
>         .select('id)
>         .collect
>     }
>   }
> {code}
> Error message:
> {code}
> Max iterations (100) reached for batch Operator Optimization before Inferring 
> Filters, tree:
> Filter (id#11 < 0)
> +- Project [id#11]
>    +- Filter if (isnull(id#11)) null else UDF(id#11)
>       +- Relation[id#11,value#12] parquet
> org.apache.spark.sql.catalyst.errors.package$TreeNodeException: Max 
> iterations (100) reached for batch Operator Optimization before Inferring 
> Filters, tree:
> Filter (id#11 < 0)
> +- Project [id#11]
>    +- Filter if (isnull(id#11)) null else UDF(id#11)
>       +- Relation[id#11,value#12] parquet
>       at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:117)
>       at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:76)
>       at scala.collection.immutable.List.foreach(List.scala:392)
>       at 
> org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:76)
>       at 
> org.apache.spark.sql.execution.QueryExecution.optimizedPlan$lzycompute(QueryExecution.scala:66)
>       at 
> org.apache.spark.sql.execution.QueryExecution.optimizedPlan(QueryExecution.scala:66)
>       at 
> org.apache.spark.sql.execution.QueryExecution.sparkPlan$lzycompute(QueryExecution.scala:72)
>       at 
> org.apache.spark.sql.execution.QueryExecution.sparkPlan(QueryExecution.scala:68)
>       at 
> org.apache.spark.sql.execution.QueryExecution.executedPlan$lzycompute(QueryExecution.scala:77)
>       at 
> org.apache.spark.sql.execution.QueryExecution.executedPlan(QueryExecution.scala:77)
>       at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3286)
>       at org.apache.spark.sql.Dataset.collect(Dataset.scala:2745)
>       at 
> org.apache.spark.sql.DataFrameSuite$$anonfun$94$$anonfun$apply$mcV$sp$199.apply(DataFrameSuite.scala:2343)
>       at 
> org.apache.spark.sql.DataFrameSuite$$anonfun$94$$anonfun$apply$mcV$sp$199.apply(DataFrameSuite.scala:2333)
>       at 
> org.apache.spark.sql.test.SQLTestUtilsBase$class.withTempDir(SQLTestUtils.scala:215)
>       at 
> org.apache.spark.sql.DataFrameSuite.withTempDir(DataFrameSuite.scala:43)
>       at 
> org.apache.spark.sql.DataFrameSuite$$anonfun$94.apply$mcV$sp(DataFrameSuite.scala:2333)
>       at 
> org.apache.spark.sql.DataFrameSuite$$anonfun$94.apply(DataFrameSuite.scala:2333)
>       at 
> org.apache.spark.sql.DataFrameSuite$$anonfun$94.apply(DataFrameSuite.scala:2333)
>       at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
>       at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
>       at org.scalatest.Transformer.apply(Transformer.scala:22)
>       at org.scalatest.Transformer.apply(Transformer.scala:20)
>       at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
>       at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:103)
>       at 
> org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:183)
>       at 
> org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:196)
>       at 
> org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:196)
>       at org.scalatest.SuperEngine.runTestImpl(Engine.scala:289)
>       at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:196)
>       at 
> org.apache.spark.sql.DataFrameSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(DataFrameSuite.scala:43)
>       at 
> org.scalatest.BeforeAndAfterEach$class.runTest(BeforeAndAfterEach.scala:221)
>       at org.apache.spark.sql.DataFrameSuite.runTest(DataFrameSuite.scala:43)
>       at 
> org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:229)
>       at 
> org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:229)
>       at 
> org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:396)
>       at 
> org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:384)
>       at scala.collection.immutable.List.foreach(List.scala:392)
>       at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:384)
>       at 
> org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:379)
>       at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:461)
>       at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:229)
>       at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
>       at org.scalatest.Suite$class.run(Suite.scala:1147)
>       at 
> org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
>       at 
> org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:233)
>       at 
> org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:233)
>       at org.scalatest.SuperEngine.runImpl(Engine.scala:521)
>       at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:233)
>       at 
> org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:52)
>       at 
> org.scalatest.BeforeAndAfterAll$class.liftedTree1$1(BeforeAndAfterAll.scala:213)
>       at 
> org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:210)
>       at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:52)
>       at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:45)
>       at 
> org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$1.apply(Runner.scala:1340)
>       at 
> org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$1.apply(Runner.scala:1334)
>       at scala.collection.immutable.List.foreach(List.scala:392)
>       at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:1334)
>       at 
> org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1011)
>       at 
> org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1010)
>       at 
> org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:1500)
>       at 
> org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1010)
>       at org.scalatest.tools.Runner$.run(Runner.scala:850)
>       at org.scalatest.tools.Runner.run(Runner.scala)
>       at 
> org.jetbrains.plugins.scala.testingSupport.scalaTest.ScalaTestRunner.runScalaTest2(ScalaTestRunner.java:131)
>       at 
> org.jetbrains.plugins.scala.testingSupport.scalaTest.ScalaTestRunner.main(ScalaTestRunner.java:28)
> {code}



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to