This is an automated email from the ASF dual-hosted git repository.

yangjie01 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 6835b30eb21 [SPARK-44351][SQL] Make some syntactic simplification
6835b30eb21 is described below

commit 6835b30eb21de74de4aed042f2a32f3c86d6029b
Author: yangjie01 <yangji...@baidu.com>
AuthorDate: Mon Jul 10 23:05:00 2023 +0800

    [SPARK-44351][SQL] Make some syntactic simplification
    
    ### What changes were proposed in this pull request?
    This pr aims make some syntactic simplification:
    
    - Use `exists` instead of `find` and `emptiness check`
    - Use `orNull` instead of `etOrElse(null)`
    - Use `getOrElse(key, value)` instead of `get(key).getOrElse(value)` on map
    - Use `find` instead of `filter` + `headOption`
    
    ### Why are the changes needed?
    Code simplification.
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    - Pass Git Hub Actions
    
    Closes #41915 from LuciferYang/syntactic-simplification.
    
    Lead-authored-by: yangjie01 <yangji...@baidu.com>
    Co-authored-by: YangJie <yangji...@baidu.com>
    Signed-off-by: yangjie01 <yangji...@baidu.com>
---
 .../scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala     | 2 +-
 .../scala/org/apache/spark/sql/catalyst/analysis/ResolveUnion.scala | 2 +-
 .../scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala     | 2 +-
 .../org/apache/spark/sql/execution/datasources/FileFormat.scala     | 6 +++---
 .../org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala | 3 +--
 .../execution/datasources/jdbc/connection/ConnectionProvider.scala  | 2 +-
 6 files changed, 8 insertions(+), 9 deletions(-)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index 94d341ed1d7..bea7fe46c7c 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -1366,7 +1366,7 @@ class Analyzer(override val catalogManager: 
CatalogManager) extends RuleExecutor
               // column names. We need to make sure the static partition 
column name doesn't appear
               // there to catch the following ambiguous query:
               // INSERT OVERWRITE t PARTITION (c='1') (c) VALUES ('2')
-              if (query.output.find(col => conf.resolver(col.name, 
staticName)).nonEmpty) {
+              if (query.output.exists(col => conf.resolver(col.name, 
staticName))) {
                 throw 
QueryCompilationErrors.staticPartitionInUserSpecifiedColumnsError(staticName)
               }
             }
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveUnion.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveUnion.scala
index bdffce0b9af..4aa5c3ebf5a 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveUnion.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveUnion.scala
@@ -84,7 +84,7 @@ object ResolveUnion extends Rule[LogicalPlan] {
     }
 
     colType.fields
-      .filter(f => targetType.fields.find(tf => resolver(f.name, 
tf.name)).isEmpty)
+      .filter(f => !targetType.fields.exists(tf => resolver(f.name, tf.name)))
       .foreach { f =>
         newStructFields ++= Literal(f.name) :: ExtractValue(col, 
Literal(f.name), resolver) :: Nil
       }
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
index 99fa0bf9809..0eeef48b071 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
@@ -3258,7 +3258,7 @@ class AstBuilder extends DataTypeAstBuilder with 
SQLConfHelper with Logging {
       ctx: ExpressionPropertyListContext): OptionList = {
     val options = ctx.expressionProperty.asScala.map { property =>
       val key: String = visitPropertyKey(property.key)
-      val value: Expression = 
Option(property.value).map(expression).getOrElse(null)
+      val value: Expression = Option(property.value).map(expression).orNull
       key -> value
     }.toSeq
     OptionList(options)
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormat.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormat.scala
index f76ea30e04c..2e71c829115 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormat.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormat.scala
@@ -292,9 +292,9 @@ object FileFormat {
       name: String,
       file: PartitionedFile,
       metadataExtractors: Map[String, PartitionedFile => Any]): Literal = {
-    val extractor = metadataExtractors.get(name).getOrElse {
-      pf: PartitionedFile => 
pf.otherConstantMetadataColumnValues.get(name).orNull
-    }
+    val extractor = metadataExtractors.getOrElse(name,
+      { pf: PartitionedFile => 
pf.otherConstantMetadataColumnValues.get(name).orNull }
+    )
     Literal(extractor.apply(file))
   }
 
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala
index d907ce6b100..3335f21a0d3 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala
@@ -222,8 +222,7 @@ object JdbcUtils extends Logging with SQLConfHelper {
       // including 
java.sql.Types.ARRAY,DATALINK,DISTINCT,JAVA_OBJECT,NULL,OTHER,REF_CURSOR,
       // TIME_WITH_TIMEZONE,TIMESTAMP_WITH_TIMEZONE, and among others.
       val jdbcType = classOf[JDBCType].getEnumConstants()
-        .filter(_.getVendorTypeNumber == sqlType)
-        .headOption
+        .find(_.getVendorTypeNumber == sqlType)
         .map(_.getName)
         .getOrElse(sqlType.toString)
       throw QueryExecutionErrors.unrecognizedSqlTypeError(jdbcType, typeName)
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/ConnectionProvider.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/ConnectionProvider.scala
index 0d8c80c9fc1..7342f701e34 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/ConnectionProvider.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/ConnectionProvider.scala
@@ -70,7 +70,7 @@ protected abstract class ConnectionProviderBase extends 
Logging {
     val selectedProvider = connectionProviderName match {
       case Some(providerName) =>
         // It is assumed that no two providers will have the same name
-        filteredProviders.filter(_.name == providerName).headOption.getOrElse {
+        filteredProviders.find(_.name == providerName).getOrElse {
           throw new IllegalArgumentException(
             s"Could not find a JDBC connection provider with name 
'$providerName' " +
             "that can handle the specified driver and options. " +


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to