This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 936c1cd22b8 [SPARK-45737][SQL] Remove unnecessary 
`.toArray[InternalRow]` in `SparkPlan#executeTake`
936c1cd22b8 is described below

commit 936c1cd22b8c8a3b6c2050f3cfc37bce5807ba28
Author: yangjie01 <yangji...@baidu.com>
AuthorDate: Tue Oct 31 08:56:21 2023 -0700

    [SPARK-45737][SQL] Remove unnecessary `.toArray[InternalRow]` in 
`SparkPlan#executeTake`
    
    ### What changes were proposed in this pull request?
    
https://github.com/apache/spark/blob/8dd3ec87e26969df6fe08f5fddc3f8d6efc2420d/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala#L535-L559
    
    In the above code, the input parameters of `mutable.Buffer#prependAll` and 
`mutable.Growable#++=` functions are `IterableOnce`
    
    - `mutable.Buffer#prependAll`
    
    ```scala
      def prependAll(elems: IterableOnce[A]): this.type = { insertAll(0, 
elems); this }
    ```
    
    - `mutable.Growable#++=`
    
    ```
      `inline` final def ++= (xs: IterableOnce[A]): this.type = addAll(xs)
    ```
    
    and the type of `rows` is `Iterator[InternalRow]`, which inherits from 
`IterableOnce`
    
    ```
    val rows = decodeUnsafeRows(res(i)._2)
    private def decodeUnsafeRows(bytes: ChunkedByteBuffer): 
Iterator[InternalRow]
    ```
    
    So there is no need to cast to an `Array` of `InternalRow` anymore.
    
    ### Why are the changes needed?
    Remove unnecessary `.toArray[InternalRow]`
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Pass GitHub Actions
    
    ### Was this patch authored or co-authored using generative AI tooling?
    No
    
    Closes #43599 from LuciferYang/sparkplan.
    
    Authored-by: yangjie01 <yangji...@baidu.com>
    Signed-off-by: Dongjoon Hyun <dh...@apple.com>
---
 .../src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
index d93a83dec44..c65d1931dd1 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
@@ -536,13 +536,13 @@ abstract class SparkPlan extends QueryPlan[SparkPlan] 
with Logging with Serializ
         while (buf.length < n && i < res.length) {
           val rows = decodeUnsafeRows(res(i)._2)
           if (n - buf.length >= res(i)._1) {
-            buf.prependAll(rows.toArray[InternalRow])
+            buf.prependAll(rows)
           } else {
             val dropUntil = res(i)._1 - (n - buf.length)
             // Same as Iterator.drop but this only takes a long.
             var j: Long = 0L
             while (j < dropUntil) { rows.next(); j += 1L}
-            buf.prependAll(rows.toArray[InternalRow])
+            buf.prependAll(rows)
           }
           i += 1
         }
@@ -550,9 +550,9 @@ abstract class SparkPlan extends QueryPlan[SparkPlan] with 
Logging with Serializ
         while (buf.length < n && i < res.length) {
           val rows = decodeUnsafeRows(res(i)._2)
           if (n - buf.length >= res(i)._1) {
-            buf ++= rows.toArray[InternalRow]
+            buf ++= rows
           } else {
-            buf ++= rows.take(n - buf.length).toArray[InternalRow]
+            buf ++= rows.take(n - buf.length)
           }
           i += 1
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to