This is an automated email from the ASF dual-hosted git repository.

srowen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 144b35f  [SPARK-27320][SQL] Replacing index with iterator to traverse 
the expressions list in AggregationIterator, which make it simpler
144b35f is described below

commit 144b35fe3a87b8391c4fbba544304a61fefda6a1
Author: 10129659 <chen.yans...@zte.com.cn>
AuthorDate: Sat Mar 30 02:27:12 2019 -0500

    [SPARK-27320][SQL] Replacing index with iterator to traverse the 
expressions list in AggregationIterator, which make it simpler
    
    ## What changes were proposed in this pull request?
    In AggregationIterator's loop function, we access the expressions by 
`expressions(i)`, the type of `expressions` is `::`, a subtype of list.
    
    ```
    while (i < expressionsLength) {
          val func = expressions(i).aggregateFunction
    ```
    
    This PR replacing  index with iterator to access the expressions list, 
which make it simpler.
    
    ## How was this patch tested?
    Existing tests.
    
    Closes #24238 from eatoncys/array.
    
    Authored-by: 10129659 <chen.yans...@zte.com.cn>
    Signed-off-by: Sean Owen <sean.o...@databricks.com>
---
 .../sql/execution/aggregate/AggregationIterator.scala     | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)

diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregationIterator.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregationIterator.scala
index a1fb23d..d03de15 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregationIterator.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregationIterator.scala
@@ -74,13 +74,12 @@ abstract class AggregationIterator(
       startingInputBufferOffset: Int): Array[AggregateFunction] = {
     var mutableBufferOffset = 0
     var inputBufferOffset: Int = startingInputBufferOffset
-    val expressionsLength = expressions.length
-    val functions = new Array[AggregateFunction](expressionsLength)
+    val functions = new Array[AggregateFunction](expressions.length)
     var i = 0
     val inputAttributeSeq: AttributeSeq = inputAttributes
-    while (i < expressionsLength) {
-      val func = expressions(i).aggregateFunction
-      val funcWithBoundReferences: AggregateFunction = expressions(i).mode 
match {
+    for (expression <- expressions) {
+      val func = expression.aggregateFunction
+      val funcWithBoundReferences: AggregateFunction = expression.mode match {
         case Partial | Complete if func.isInstanceOf[ImperativeAggregate] =>
           // We need to create BoundReferences if the function is not an
           // expression-based aggregate function (it does not support 
code-gen) and the mode of
@@ -158,9 +157,9 @@ abstract class AggregationIterator(
       inputAttributes: Seq[Attribute]): (InternalRow, InternalRow) => Unit = {
     val joinedRow = new JoinedRow
     if (expressions.nonEmpty) {
-      val mergeExpressions = functions.zipWithIndex.flatMap {
-        case (ae: DeclarativeAggregate, i) =>
-          expressions(i).mode match {
+      val mergeExpressions = functions.zip(expressions).flatMap {
+        case (ae: DeclarativeAggregate, expression) =>
+          expression.mode match {
             case Partial | Complete => ae.updateExpressions
             case PartialMerge | Final => ae.mergeExpressions
           }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to