This is an automated email from the ASF dual-hosted git repository.

ruifengz pushed a commit to branch branch-3.5
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.5 by this push:
     new 53e2e7bdd618 [SPARK-46189][PS][SQL] Perform comparisons and arithmetic 
between same types in various Pandas aggregate functions to avoid interpreted 
mode errors
53e2e7bdd618 is described below

commit 53e2e7bdd618e2a7dec5a84b9d5ae965fb136179
Author: Bruce Robbins <bersprock...@gmail.com>
AuthorDate: Fri Dec 1 10:28:33 2023 +0800

    [SPARK-46189][PS][SQL] Perform comparisons and arithmetic between same 
types in various Pandas aggregate functions to avoid interpreted mode errors
    
    ### What changes were proposed in this pull request?
    
    In various Pandas aggregate functions, remove each comparison or arithmetic 
operation between `DoubleType` and `IntergerType` in `evaluateExpression` and 
replace with a comparison or arithmetic operation between `DoubleType` and 
`DoubleType`.
    
    Affected functions are `PandasStddev`, `PandasVariance`, `PandasSkewness`, 
`PandasKurtosis`, and `PandasCovar`.
    
    ### Why are the changes needed?
    
    These functions fail in interpreted mode. For example, `evaluateExpression` 
in `PandasKurtosis` compares a double to an integer:
    ```
    If(n < 4, Literal.create(null, DoubleType) ...
    ```
    This results in a boxed double and a boxed integer getting passed to 
`SQLOrderingUtil.compareDoubles` which expects two doubles as arguments. The 
scala runtime tries to unbox the boxed integer as a double, resulting in an 
error.
    
    Reproduction example:
    ```
    spark.sql("set spark.sql.codegen.wholeStage=false")
    spark.sql("set spark.sql.codegen.factoryMode=NO_CODEGEN")
    
    import numpy as np
    import pandas as pd
    
    import pyspark.pandas as ps
    
    pser = pd.Series([1, 2, 3, 7, 9, 8], index=np.random.rand(6), name="a")
    psser = ps.from_pandas(pser)
    
    psser.kurt()
    ```
    See Jira (SPARK-46189) for the other reproduction cases.
    
    This works fine in codegen mode because the integer is already unboxed and 
the Java runtime will implictly cast it to a double.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    New unit tests.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    No.
    
    Closes #44099 from bersprockets/unboxing_error.
    
    Authored-by: Bruce Robbins <bersprock...@gmail.com>
    Signed-off-by: Ruifeng Zheng <ruife...@apache.org>
    (cherry picked from commit 042d8546be5d160e203ad78a8aa2e12e74142338)
    Signed-off-by: Ruifeng Zheng <ruife...@apache.org>
---
 .../expressions/aggregate/CentralMomentAgg.scala   | 16 ++---
 .../expressions/aggregate/Covariance.scala         |  2 +-
 .../aggregate/CentralMomentAggSuite.scala          | 77 ++++++++++++++++++++++
 .../expressions/aggregate/CovarianceAggSuite.scala | 39 +++++++++++
 .../aggregate/DeclarativeAggregateEvaluator.scala  | 10 +--
 .../aggregate/TestWithAndWithoutCodegen.scala      | 35 ++++++++++
 6 files changed, 165 insertions(+), 14 deletions(-)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/CentralMomentAgg.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/CentralMomentAgg.scala
index 133a39d98745..316cb9e0bbc3 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/CentralMomentAgg.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/CentralMomentAgg.scala
@@ -353,7 +353,7 @@ case class PandasStddev(
 
   override val evaluateExpression: Expression = {
     If(n === 0.0, Literal.create(null, DoubleType),
-      If(n === ddof, divideByZeroEvalResult, sqrt(m2 / (n - ddof))))
+      If(n === ddof.toDouble, divideByZeroEvalResult, sqrt(m2 / (n - 
ddof.toDouble))))
   }
 
   override def prettyName: String = "pandas_stddev"
@@ -375,7 +375,7 @@ case class PandasVariance(
 
   override val evaluateExpression: Expression = {
     If(n === 0.0, Literal.create(null, DoubleType),
-      If(n === ddof, divideByZeroEvalResult, m2 / (n - ddof)))
+      If(n === ddof.toDouble, divideByZeroEvalResult, m2 / (n - 
ddof.toDouble)))
   }
 
   override def prettyName: String = "pandas_variance"
@@ -405,8 +405,8 @@ case class PandasSkewness(child: Expression)
     val _m2 = If(abs(m2) < 1e-14, Literal(0.0), m2)
     val _m3 = If(abs(m3) < 1e-14, Literal(0.0), m3)
 
-    If(n < 3, Literal.create(null, DoubleType),
-      If(_m2 === 0.0, Literal(0.0), sqrt(n - 1) * (n / (n - 2)) * _m3 / 
sqrt(_m2 * _m2 * _m2)))
+    If(n < 3.0, Literal.create(null, DoubleType),
+      If(_m2 === 0.0, Literal(0.0), sqrt(n - 1.0) * (n / (n - 2.0)) * _m3 / 
sqrt(_m2 * _m2 * _m2)))
   }
 
   override protected def withNewChildInternal(newChild: Expression): 
PandasSkewness =
@@ -423,9 +423,9 @@ case class PandasKurtosis(child: Expression)
   override protected def momentOrder = 4
 
   override val evaluateExpression: Expression = {
-    val adj = ((n - 1) / (n - 2)) * ((n - 1) / (n - 3)) * 3
-    val numerator = n * (n + 1) * (n - 1) * m4
-    val denominator = (n - 2) * (n - 3) * m2 * m2
+    val adj = ((n - 1.0) / (n - 2.0)) * ((n - 1.0) / (n - 3.0)) * 3.0
+    val numerator = n * (n + 1.0) * (n - 1.0) * m4
+    val denominator = (n - 2.0) * (n - 3.0) * m2 * m2
 
     // floating point error
     //
@@ -436,7 +436,7 @@ case class PandasKurtosis(child: Expression)
     val _numerator = If(abs(numerator) < 1e-14, Literal(0.0), numerator)
     val _denominator = If(abs(denominator) < 1e-14, Literal(0.0), denominator)
 
-    If(n < 4, Literal.create(null, DoubleType),
+    If(n < 4.0, Literal.create(null, DoubleType),
       If(_denominator === 0.0, Literal(0.0), _numerator / _denominator - adj))
   }
 
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/Covariance.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/Covariance.scala
index ff31fb1128b9..b392b603ab8d 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/Covariance.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/Covariance.scala
@@ -156,7 +156,7 @@ case class PandasCovar(
 
   override val evaluateExpression: Expression = {
     If(n === 0.0, Literal.create(null, DoubleType),
-      If(n === ddof, divideByZeroEvalResult, ck / (n - ddof)))
+      If(n === ddof.toDouble, divideByZeroEvalResult, ck / (n - 
ddof.toDouble)))
   }
   override def prettyName: String = "pandas_covar"
 
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/CentralMomentAggSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/CentralMomentAggSuite.scala
new file mode 100644
index 000000000000..daf3ede0d036
--- /dev/null
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/CentralMomentAggSuite.scala
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.catalyst.expressions.aggregate
+
+import org.apache.spark.sql.catalyst.InternalRow
+import org.apache.spark.sql.catalyst.expressions.AttributeReference
+import org.apache.spark.sql.types.DoubleType
+
+class CentralMomentAggSuite extends TestWithAndWithoutCodegen {
+  val input = AttributeReference("input", DoubleType, nullable = true)()
+
+  testBothCodegenAndInterpreted("SPARK-46189: pandas_kurtosis eval") {
+    val evaluator = DeclarativeAggregateEvaluator(PandasKurtosis(input), 
Seq(input))
+    val buffer = evaluator.update(
+      InternalRow(1.0d),
+      InternalRow(2.0d),
+      InternalRow(3.0d),
+      InternalRow(7.0d),
+      InternalRow(9.0d),
+      InternalRow(8.0d))
+    val result = evaluator.eval(buffer)
+    assert(result === InternalRow(-2.5772889417360285d))
+  }
+
+  testBothCodegenAndInterpreted("SPARK-46189: pandas_skew eval") {
+    val evaluator = DeclarativeAggregateEvaluator(PandasSkewness(input), 
Seq(input))
+    val buffer = evaluator.update(
+      InternalRow(1.0d),
+      InternalRow(2.0d),
+      InternalRow(2.0d),
+      InternalRow(2.0d),
+      InternalRow(2.0d),
+      InternalRow(100.0d))
+    val result = evaluator.eval(buffer)
+    assert(result === InternalRow(2.4489389171333733d))
+  }
+
+  testBothCodegenAndInterpreted("SPARK-46189: pandas_stddev eval") {
+    val evaluator = DeclarativeAggregateEvaluator(PandasStddev(input, 1), 
Seq(input))
+    val buffer = evaluator.update(
+      InternalRow(1.0d),
+      InternalRow(2.0d),
+      InternalRow(3.0d),
+      InternalRow(7.0d),
+      InternalRow(9.0d),
+      InternalRow(8.0d))
+    val result = evaluator.eval(buffer)
+    assert(result === InternalRow(3.40587727318528d))
+  }
+
+  testBothCodegenAndInterpreted("SPARK-46189: pandas_variance eval") {
+    val evaluator = DeclarativeAggregateEvaluator(PandasVariance(input, 1), 
Seq(input))
+    val buffer = evaluator.update(
+      InternalRow(1.0d),
+      InternalRow(2.0d),
+      InternalRow(3.0d),
+      InternalRow(7.0d),
+      InternalRow(9.0d),
+      InternalRow(8.0d))
+    val result = evaluator.eval(buffer)
+    assert(result === InternalRow(11.6d))
+  }
+}
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/CovarianceAggSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/CovarianceAggSuite.scala
new file mode 100644
index 000000000000..2df053184c2b
--- /dev/null
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/CovarianceAggSuite.scala
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.catalyst.expressions.aggregate
+
+import org.apache.spark.sql.catalyst.InternalRow
+import org.apache.spark.sql.catalyst.expressions.AttributeReference
+import org.apache.spark.sql.types.DoubleType
+
+class CovarianceAggSuite extends TestWithAndWithoutCodegen {
+  val a = AttributeReference("a", DoubleType, nullable = true)()
+  val b = AttributeReference("b", DoubleType, nullable = true)()
+
+  testBothCodegenAndInterpreted("SPARK-46189: pandas_covar eval") {
+    val evaluator = DeclarativeAggregateEvaluator(PandasCovar(a, b, 1), Seq(a, 
b))
+    val buffer = evaluator.update(
+      InternalRow(1.0d, 1.0d),
+      InternalRow(2.0d, 2.0d),
+      InternalRow(3.0d, 3.0d),
+      InternalRow(7.0d, 7.0d),
+      InternalRow(9.0, 9.0),
+      InternalRow(8.0d, 6.0))
+    val result = evaluator.eval(buffer)
+    assert(result === InternalRow(10.4d))
+  }
+}
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/DeclarativeAggregateEvaluator.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/DeclarativeAggregateEvaluator.scala
index b0f55b3b5c44..ac80e1419a99 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/DeclarativeAggregateEvaluator.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/DeclarativeAggregateEvaluator.scala
@@ -17,24 +17,24 @@
 package org.apache.spark.sql.catalyst.expressions.aggregate
 
 import org.apache.spark.sql.catalyst.InternalRow
-import org.apache.spark.sql.catalyst.expressions.{Attribute, JoinedRow, 
SafeProjection}
+import org.apache.spark.sql.catalyst.expressions.{Attribute, JoinedRow, 
MutableProjection}
 
 /**
  * Evaluator for a [[DeclarativeAggregate]].
  */
 case class DeclarativeAggregateEvaluator(function: DeclarativeAggregate, 
input: Seq[Attribute]) {
 
-  lazy val initializer = SafeProjection.create(function.initialValues)
+  lazy val initializer = MutableProjection.create(function.initialValues)
 
-  lazy val updater = SafeProjection.create(
+  lazy val updater = MutableProjection.create(
     function.updateExpressions,
     function.aggBufferAttributes ++ input)
 
-  lazy val merger = SafeProjection.create(
+  lazy val merger = MutableProjection.create(
     function.mergeExpressions,
     function.aggBufferAttributes ++ function.inputAggBufferAttributes)
 
-  lazy val evaluator = SafeProjection.create(
+  lazy val evaluator = MutableProjection.create(
     function.evaluateExpression :: Nil,
     function.aggBufferAttributes)
 
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/TestWithAndWithoutCodegen.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/TestWithAndWithoutCodegen.scala
new file mode 100644
index 000000000000..b43b160146eb
--- /dev/null
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/TestWithAndWithoutCodegen.scala
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.catalyst.expressions.aggregate
+
+import org.apache.spark.SparkFunSuite
+import org.apache.spark.sql.catalyst.expressions.CodegenObjectFactoryMode
+import org.apache.spark.sql.catalyst.plans.SQLHelper
+import org.apache.spark.sql.internal.SQLConf
+
+trait TestWithAndWithoutCodegen extends SparkFunSuite with SQLHelper {
+  def testBothCodegenAndInterpreted(name: String)(f: => Unit): Unit = {
+    val modes = Seq(CodegenObjectFactoryMode.CODEGEN_ONLY, 
CodegenObjectFactoryMode.NO_CODEGEN)
+    for (fallbackMode <- modes) {
+      test(s"$name with $fallbackMode") {
+        withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> fallbackMode.toString) 
{
+          f
+        }
+      }
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to