This is an automated email from the ASF dual-hosted git repository.

srowen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new ad6cd60ca74 [SPARK-44024][SQL] Change to use `map` when `unzip` only 
used to extract a single element
ad6cd60ca74 is described below

commit ad6cd60ca7408018d8c6259597456e9c2fe8b376
Author: yangjie01 <yangji...@baidu.com>
AuthorDate: Sun Jun 18 07:19:56 2023 -0500

    [SPARK-44024][SQL] Change to use `map` when `unzip` only used to extract a 
single element
    
    ### What changes were proposed in this pull request?
    A minor code simplification, use `map` instead of `unzip` when `unzip` only 
used to extract a single element.
    
    ### Why are the changes needed?
    Code simplification
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Pass GitHub Actions
    
    Closes #41548 from LuciferYang/SPARK-44024.
    
    Lead-authored-by: yangjie01 <yangji...@baidu.com>
    Co-authored-by: YangJie <yangji...@baidu.com>
    Signed-off-by: Sean Owen <sro...@gmail.com>
---
 .../scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala     | 2 +-
 .../apache/spark/sql/execution/datasources/v2/CreateIndexExec.scala   | 2 +-
 .../spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala   | 4 ++--
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
index 568e3d30e34..c70dba01808 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
@@ -861,7 +861,7 @@ object ColumnPruning extends Rule[LogicalPlan] {
       val newProjects = e.projections.map { proj =>
         proj.zip(e.output).filter { case (_, a) =>
           newOutput.contains(a)
-        }.unzip._1
+        }.map(_._1)
       }
       a.copy(child = Expand(newProjects, newOutput, grandChild))
 
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/CreateIndexExec.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/CreateIndexExec.scala
index 20ccf991af6..8dac6737334 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/CreateIndexExec.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/CreateIndexExec.scala
@@ -52,7 +52,7 @@ case class CreateIndexExec(
     }
     try {
       table.createIndex(
-        indexName, columns.unzip._1.toArray, colProperties, 
propertiesWithIndexType.asJava)
+        indexName, columns.map(_._1).toArray, colProperties, 
propertiesWithIndexType.asJava)
     } catch {
       case _: IndexAlreadyExistsException if ignoreIfExists =>
         logWarning(s"Index $indexName already exists in table ${table.name}. 
Ignoring.")
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala
index 49a6c7232ec..e58fe7844ab 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala
@@ -192,11 +192,11 @@ object V2ScanRelationPushDown extends Rule[LogicalPlan] 
with PredicateHelper {
       val groupOutputMap = normalizedGroupingExpr.zipWithIndex.map { case (e, 
i) =>
         AttributeReference(s"group_col_$i", e.dataType)() -> e
       }
-      val groupOutput = groupOutputMap.unzip._1
+      val groupOutput = groupOutputMap.map(_._1)
       val aggOutputMap = finalAggExprs.zipWithIndex.map { case (e, i) =>
         AttributeReference(s"agg_func_$i", e.dataType)() -> e
       }
-      val aggOutput = aggOutputMap.unzip._1
+      val aggOutput = aggOutputMap.map(_._1)
       val newOutput = groupOutput ++ aggOutput
       val groupByExprToOutputOrdinal = mutable.HashMap.empty[Expression, Int]
       normalizedGroupingExpr.zipWithIndex.foreach { case (expr, ordinal) =>


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to