This is an automated email from the ASF dual-hosted git repository.

yangjie01 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new ec875c521fee [SPARK-45688][SPARK-45693][CORE] Clean up the deprecated 
API usage related to `MapOps` & Fix `method += in trait Growable is deprecated`
ec875c521fee is described below

commit ec875c521feed18f72200a8f87a2be5d9e3ccf96
Author: panbingkun <pbk1...@gmail.com>
AuthorDate: Fri Nov 3 23:27:38 2023 +0800

    [SPARK-45688][SPARK-45693][CORE] Clean up the deprecated API usage related 
to `MapOps` & Fix `method += in trait Growable is deprecated`
    
    ### What changes were proposed in this pull request?
    The pr aims to:
    - clean up the deprecated API usage related to MapOps.
    - fix method += in trait Growable is deprecated.
    
    ### Why are the changes needed?
    Eliminate warnings and no longer use `deprecated scala APIs`.
    
    ### Does this PR introduce _any_ user-facing change?
    No.
    
    ### How was this patch tested?
    - Pass GA.
    - Manually test:
       ```
       build/sbt -Phadoop-3 -Pdocker-integration-tests -Pspark-ganglia-lgpl 
-Pkinesis-asl -Pkubernetes -Phive-thriftserver -Pconnect -Pyarn -Phive 
-Phadoop-cloud -Pvolcano -Pkubernetes-integration-tests Test/package 
streaming-kinesis-asl-assembly/assembly connect/assembly
       ```
    
    ### Was this patch authored or co-authored using generative AI tooling?
    No.
    
    Closes #43578 from panbingkun/SPARK-45688.
    
    Authored-by: panbingkun <pbk1...@gmail.com>
    Signed-off-by: yangjie01 <yangji...@baidu.com>
---
 .../org/apache/spark/deploy/SparkSubmit.scala      | 28 +++++++++++-----------
 .../apache/spark/deploy/worker/CommandUtils.scala  |  5 ++--
 .../org/apache/spark/util/JsonProtocolSuite.scala  |  2 +-
 .../cluster/YarnClientSchedulerBackend.scala       |  2 +-
 .../logical/statsEstimation/JoinEstimation.scala   |  7 +++---
 .../datasources/v2/DescribeNamespaceExec.scala     |  4 ++--
 .../datasources/v2/V2SessionCatalogSuite.scala     |  2 +-
 7 files changed, 25 insertions(+), 25 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala 
b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
index e60be5d5a651..30b542eefb60 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
@@ -715,7 +715,7 @@ private[spark] class SparkSubmit extends Logging {
       if (opt.value != null &&
           (deployMode & opt.deployMode) != 0 &&
           (clusterManager & opt.clusterManager) != 0) {
-        if (opt.clOption != null) { childArgs += (opt.clOption, opt.value) }
+        if (opt.clOption != null) { childArgs += opt.clOption += opt.value }
         if (opt.confKey != null) {
           if (opt.mergeFn.isDefined && sparkConf.contains(opt.confKey)) {
             sparkConf.set(opt.confKey, 
opt.mergeFn.get.apply(sparkConf.get(opt.confKey), opt.value))
@@ -747,15 +747,15 @@ private[spark] class SparkSubmit extends Logging {
     if (args.isStandaloneCluster) {
       if (args.useRest) {
         childMainClass = REST_CLUSTER_SUBMIT_CLASS
-        childArgs += (args.primaryResource, args.mainClass)
+        childArgs += args.primaryResource += args.mainClass
       } else {
         // In legacy standalone cluster mode, use Client as a wrapper around 
the user class
         childMainClass = STANDALONE_CLUSTER_SUBMIT_CLASS
         if (args.supervise) { childArgs += "--supervise" }
-        Option(args.driverMemory).foreach { m => childArgs += ("--memory", m) }
-        Option(args.driverCores).foreach { c => childArgs += ("--cores", c) }
+        Option(args.driverMemory).foreach { m => childArgs += "--memory" += m }
+        Option(args.driverCores).foreach { c => childArgs += "--cores" += c }
         childArgs += "launch"
-        childArgs += (args.master, args.primaryResource, args.mainClass)
+        childArgs += args.master += args.primaryResource += args.mainClass
       }
       if (args.childArgs != null) {
         childArgs ++= args.childArgs
@@ -777,20 +777,20 @@ private[spark] class SparkSubmit extends Logging {
     if (isYarnCluster) {
       childMainClass = YARN_CLUSTER_SUBMIT_CLASS
       if (args.isPython) {
-        childArgs += ("--primary-py-file", args.primaryResource)
-        childArgs += ("--class", "org.apache.spark.deploy.PythonRunner")
+        childArgs += "--primary-py-file" += args.primaryResource
+        childArgs += "--class" += "org.apache.spark.deploy.PythonRunner"
       } else if (args.isR) {
         val mainFile = new Path(args.primaryResource).getName
-        childArgs += ("--primary-r-file", mainFile)
-        childArgs += ("--class", "org.apache.spark.deploy.RRunner")
+        childArgs += "--primary-r-file" += mainFile
+        childArgs += "--class" += "org.apache.spark.deploy.RRunner"
       } else {
         if (args.primaryResource != SparkLauncher.NO_RESOURCE) {
-          childArgs += ("--jar", args.primaryResource)
+          childArgs += "--jar" += args.primaryResource
         }
-        childArgs += ("--class", args.mainClass)
+        childArgs += "--class" += args.mainClass
       }
       if (args.childArgs != null) {
-        args.childArgs.foreach { arg => childArgs += ("--arg", arg) }
+        args.childArgs.foreach { arg => childArgs += "--arg" += arg }
       }
     }
 
@@ -813,12 +813,12 @@ private[spark] class SparkSubmit extends Logging {
       }
       if (args.childArgs != null) {
         args.childArgs.foreach { arg =>
-          childArgs += ("--arg", arg)
+          childArgs += "--arg" += arg
         }
       }
       // Pass the proxyUser to the k8s app so it is possible to add it to the 
driver args
       if (args.proxyUser != null) {
-        childArgs += ("--proxy-user", args.proxyUser)
+        childArgs += "--proxy-user" += args.proxyUser
       }
     }
 
diff --git 
a/core/src/main/scala/org/apache/spark/deploy/worker/CommandUtils.scala 
b/core/src/main/scala/org/apache/spark/deploy/worker/CommandUtils.scala
index c04214de4ddc..d1190ca46c2a 100644
--- a/core/src/main/scala/org/apache/spark/deploy/worker/CommandUtils.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/worker/CommandUtils.scala
@@ -81,14 +81,15 @@ object CommandUtils extends Logging {
 
     var newEnvironment = if (libraryPathEntries.nonEmpty && 
libraryPathName.nonEmpty) {
       val libraryPaths = libraryPathEntries ++ cmdLibraryPath ++ 
env.get(libraryPathName)
-      command.environment + ((libraryPathName, 
libraryPaths.mkString(File.pathSeparator)))
+      command.environment ++ Map(libraryPathName -> 
libraryPaths.mkString(File.pathSeparator))
     } else {
       command.environment
     }
 
     // set auth secret to env variable if needed
     if (securityMgr.isAuthenticationEnabled()) {
-      newEnvironment += (SecurityManager.ENV_AUTH_SECRET -> 
securityMgr.getSecretKey())
+      newEnvironment = newEnvironment ++
+        Map(SecurityManager.ENV_AUTH_SECRET -> securityMgr.getSecretKey())
     }
     // set SSL env variables if needed
     newEnvironment ++= securityMgr.getEnvironmentForSslRpcPasswords
diff --git a/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala 
b/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala
index 3defd4b1a7d9..948bc8889bcd 100644
--- a/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala
@@ -626,7 +626,7 @@ class JsonProtocolSuite extends SparkFunSuite {
     val expectedEvent: SparkListenerEnvironmentUpdate = {
       val e = 
JsonProtocol.environmentUpdateFromJson(environmentUpdateJsonString)
       e.copy(environmentDetails =
-        e.environmentDetails + ("Metrics Properties" -> Seq.empty[(String, 
String)]))
+        e.environmentDetails ++ Map("Metrics Properties" -> Seq.empty[(String, 
String)]))
     }
     val oldEnvironmentUpdateJson = environmentUpdateJsonString
       .removeField("Metrics Properties")
diff --git 
a/resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientSchedulerBackend.scala
 
b/resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientSchedulerBackend.scala
index 717c620f5c34..af41d30c2cdb 100644
--- 
a/resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientSchedulerBackend.scala
+++ 
b/resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClientSchedulerBackend.scala
@@ -53,7 +53,7 @@ private[spark] class YarnClientSchedulerBackend(
     sc.ui.foreach { ui => conf.set(DRIVER_APP_UI_ADDRESS, ui.webUrl) }
 
     val argsArrayBuf = new ArrayBuffer[String]()
-    argsArrayBuf += ("--arg", hostport)
+    argsArrayBuf += "--arg" += hostport
 
     logDebug("ClientArguments called with: " + argsArrayBuf.mkString(" "))
     val args = new ClientArguments(argsArrayBuf.toArray)
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/JoinEstimation.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/JoinEstimation.scala
index c6e76df1b31a..10646130a910 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/JoinEstimation.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/JoinEstimation.scala
@@ -206,13 +206,12 @@ case class JoinEstimation(join: Join) extends Logging {
           case _ =>
             computeByNdv(leftKey, rightKey, newMin, newMax)
         }
-        keyStatsAfterJoin += (
+        keyStatsAfterJoin +=
           // Histograms are propagated as unchanged. During future estimation, 
they should be
           // truncated by the updated max/min. In this way, only pointers of 
the histograms are
           // propagated and thus reduce memory consumption.
-          leftKey -> joinStat.copy(histogram = leftKeyStat.histogram),
-          rightKey -> joinStat.copy(histogram = rightKeyStat.histogram)
-        )
+          (leftKey -> joinStat.copy(histogram = leftKeyStat.histogram)) +=
+            (rightKey -> joinStat.copy(histogram = rightKeyStat.histogram))
         // Return cardinality estimated from the most selective join keys.
         if (card < joinCard) joinCard = card
       } else {
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DescribeNamespaceExec.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DescribeNamespaceExec.scala
index 125952566d7e..d97ffb694060 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DescribeNamespaceExec.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DescribeNamespaceExec.scala
@@ -46,12 +46,12 @@ case class DescribeNamespaceExec(
     }
 
     if (isExtended) {
-      val properties = metadata.asScala -- 
CatalogV2Util.NAMESPACE_RESERVED_PROPERTIES
+      val properties = metadata.asScala.toMap -- 
CatalogV2Util.NAMESPACE_RESERVED_PROPERTIES
       val propertiesStr =
         if (properties.isEmpty) {
           ""
         } else {
-          
conf.redactOptions(properties.toMap).toSeq.sortBy(_._1).mkString("(", ", ", ")")
+          conf.redactOptions(properties).toSeq.sortBy(_._1).mkString("(", ", 
", ")")
         }
       rows += toCatalystRow("Properties", propertiesStr)
     }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalogSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalogSuite.scala
index c43658eacabc..f9da55ed6ba3 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalogSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalogSuite.scala
@@ -827,7 +827,7 @@ class V2SessionCatalogNamespaceSuite extends 
V2SessionCatalogBaseSuite {
     // remove location and comment that are automatically added by HMS unless 
they are expected
     val toRemove =
       CatalogV2Util.NAMESPACE_RESERVED_PROPERTIES.filter(expected.contains)
-    assert(expected -- toRemove === actual)
+    assert(expected.toMap -- toRemove === actual)
   }
 
   test("listNamespaces: basic behavior") {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to