This is an automated email from the ASF dual-hosted git repository.

srowen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 34d5272663c [SPARK-40607][CORE][SQL][MLLIB][SS] Remove redundant 
string interpolator operations
34d5272663c is described below

commit 34d5272663ce4852ca5b2daa665983a321b42060
Author: yangjie01 <yangji...@baidu.com>
AuthorDate: Wed Oct 5 18:05:12 2022 -0500

    [SPARK-40607][CORE][SQL][MLLIB][SS] Remove redundant string interpolator 
operations
    
    ### What changes were proposed in this pull request?
    This pr remove redundant string interpolator operations in Spark code, and 
the change of this pr does not include the code related to logs, exceptions, 
and `configurations.doc`.
    
    ### Why are the changes needed?
    Clean up unnecessary function calls
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Pass Github Actions
    
    Closes #38043 from LuciferYang/unused-s.
    
    Authored-by: yangjie01 <yangji...@baidu.com>
    Signed-off-by: Sean Owen <sro...@gmail.com>
---
 core/src/main/scala/org/apache/spark/TaskEndReason.scala       |  2 +-
 core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala   | 10 +++++-----
 .../org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala |  4 ++--
 .../scala/org/apache/spark/sql/catalyst/expressions/Cast.scala |  4 ++--
 .../spark/sql/catalyst/expressions/collectionOperations.scala  |  2 +-
 .../spark/sql/catalyst/expressions/datetimeExpressions.scala   |  2 +-
 .../org/apache/spark/sql/catalyst/expressions/literals.scala   |  2 +-
 .../sql/catalyst/optimizer/PullOutGroupingExpressions.scala    |  2 +-
 .../org/apache/spark/sql/catalyst/parser/AstBuilder.scala      |  2 +-
 .../main/scala/org/apache/spark/sql/execution/HiveResult.scala |  2 +-
 .../spark/sql/execution/aggregate/HashMapGenerator.scala       |  2 +-
 .../sql/execution/aggregate/RowBasedHashMapGenerator.scala     |  2 +-
 .../apache/spark/sql/execution/basicPhysicalOperators.scala    |  2 +-
 .../spark/sql/execution/joins/BroadcastHashJoinExec.scala      |  2 +-
 .../spark/sql/execution/streaming/ResolveWriteToStream.scala   |  2 +-
 .../src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala   |  2 +-
 .../main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala |  2 +-
 17 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/TaskEndReason.scala 
b/core/src/main/scala/org/apache/spark/TaskEndReason.scala
index 5dc70e9834b..f1ce302a05d 100644
--- a/core/src/main/scala/org/apache/spark/TaskEndReason.scala
+++ b/core/src/main/scala/org/apache/spark/TaskEndReason.scala
@@ -242,7 +242,7 @@ case class TaskCommitDenied(
     jobID: Int,
     partitionID: Int,
     attemptNumber: Int) extends TaskFailedReason {
-  override def toErrorString: String = s"TaskCommitDenied (Driver denied task 
commit)" +
+  override def toErrorString: String = "TaskCommitDenied (Driver denied task 
commit)" +
     s" for job: $jobID, partition: $partitionID, attemptNumber: $attemptNumber"
   /**
    * If a task failed because its attempt to commit was denied, do not count 
this failure
diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala 
b/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala
index 8106eec847e..1934e9e58e6 100644
--- a/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala
+++ b/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala
@@ -360,7 +360,7 @@ private[ui] class StagePage(parent: StagesTab, store: 
AppStatusStore) extends We
                |'content': '<div class="task-assignment-timeline-content"
                  |data-toggle="tooltip" data-placement="top"
                  |data-html="true" data-container="body"
-                 |data-title="${s"Task " + index + " (attempt " + attempt + 
")"}<br>
+                 |data-title="${"Task " + index + " (attempt " + attempt + 
")"}<br>
                  |Status: ${taskInfo.status}<br>
                  |Launch Time: ${UIUtils.formatDate(new Date(launchTime))}
                  |${
@@ -416,7 +416,7 @@ private[ui] class StagePage(parent: StagesTab, store: 
AppStatusStore) extends We
           <span>Enable zooming</span>
         </div>
         <div>
-          <form id={s"form-event-timeline-page"}
+          <form id={"form-event-timeline-page"}
                 method="get"
                 action=""
                 class="form-inline float-right justify-content-end"
@@ -426,13 +426,13 @@ private[ui] class StagePage(parent: StagesTab, store: 
AppStatusStore) extends We
             <input type="hidden" name="attempt" 
value={stageAttemptId.toString} />
             <input type="text"
                    name="task.eventTimelinePageNumber"
-                   id={s"form-event-timeline-page-no"}
+                   id={"form-event-timeline-page-no"}
                    value={page.toString}
                    class="col-1 form-control" />
 
             <label>. Show </label>
             <input type="text"
-                   id={s"form-event-timeline-page-size"}
+                   id={"form-event-timeline-page-size"}
                    name="task.eventTimelinePageSize"
                    value={pageSize.toString}
                    class="col-1 form-control" />
@@ -445,7 +445,7 @@ private[ui] class StagePage(parent: StagesTab, store: 
AppStatusStore) extends We
       {TIMELINE_LEGEND}
     </div> ++
     <script type="text/javascript">
-      {Unparsed(s"drawTaskAssignmentTimeline(" +
+      {Unparsed("drawTaskAssignmentTimeline(" +
       s"$groupArrayStr, $executorsArrayStr, $minLaunchTime, $maxFinishTime, " +
         s"${UIUtils.getTimeZoneOffset()})")}
     </script>
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala
index 2f6b9c1e11a..c61aa14edca 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala
@@ -144,8 +144,8 @@ private[shared] object SharedParamsCodeGen {
         case _ if c == classOf[Float] => "FloatParam"
         case _ if c == classOf[Double] => "DoubleParam"
         case _ if c == classOf[Boolean] => "BooleanParam"
-        case _ if c.isArray && c.getComponentType == classOf[String] => 
s"StringArrayParam"
-        case _ if c.isArray && c.getComponentType == classOf[Double] => 
s"DoubleArrayParam"
+        case _ if c.isArray && c.getComponentType == classOf[String] => 
"StringArrayParam"
+        case _ if c.isArray && c.getComponentType == classOf[Double] => 
"DoubleArrayParam"
         case _ => s"Param[${getTypeString(c)}]"
       }
     }
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Cast.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Cast.scala
index 78cac4143d3..549bc70bac7 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Cast.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Cast.scala
@@ -2267,7 +2267,7 @@ case class Cast(
         (c, evPrim, evNull) =>
           val handleNull = if (ansiEnabled) {
             val errorContext = getContextOrNullCode(ctx)
-            s"throw QueryExecutionErrors.invalidInputInCastToNumberError(" +
+            "throw QueryExecutionErrors.invalidInputInCastToNumberError(" +
               s"org.apache.spark.sql.types.FloatType$$.MODULE$$,$c, 
$errorContext);"
           } else {
             s"$evNull = true;"
@@ -2305,7 +2305,7 @@ case class Cast(
         (c, evPrim, evNull) =>
           val handleNull = if (ansiEnabled) {
             val errorContext = getContextOrNullCode(ctx)
-            s"throw QueryExecutionErrors.invalidInputInCastToNumberError(" +
+            "throw QueryExecutionErrors.invalidInputInCastToNumberError(" +
               s"org.apache.spark.sql.types.DoubleType$$.MODULE$$, $c, 
$errorContext);"
           } else {
             s"$evNull = true;"
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala
index 0d18533e246..cad833d66f6 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala
@@ -730,7 +730,7 @@ case class MapConcat(children: Seq[Expression]) extends 
ComplexTypeMergingExpres
     val prepareMaps = ctx.splitExpressionsWithCurrentInputs(
       expressions = assignments,
       funcName = "getMapConcatInputs",
-      extraArguments = (s"MapData[]", argsName) :: ("boolean", hasNullName) :: 
Nil,
+      extraArguments = ("MapData[]", argsName) :: ("boolean", hasNullName) :: 
Nil,
       returnType = "boolean",
       makeSplitFunction = body =>
         s"""
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/datetimeExpressions.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/datetimeExpressions.scala
index 07933c42af6..22a381278f5 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/datetimeExpressions.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/datetimeExpressions.scala
@@ -1293,7 +1293,7 @@ abstract class ToTimestamp
   override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
     val javaType = CodeGenerator.javaType(dataType)
     val parseErrorBranch: String = if (failOnError) {
-      s"throw QueryExecutionErrors.ansiDateTimeParseError(e);"
+      "throw QueryExecutionErrors.ansiDateTimeParseError(e);"
     } else {
       s"${ev.isNull} = true;"
     }
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala
index f3ee251a0fb..38bef3bc36e 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala
@@ -346,7 +346,7 @@ case class Literal (value: Any, dataType: DataType) extends 
LeafExpression {
 
   override def toString: String = value match {
     case null => "null"
-    case binary: Array[Byte] => s"0x" + ApacheHex.encodeHexString(binary, 
false)
+    case binary: Array[Byte] => "0x" + ApacheHex.encodeHexString(binary, false)
     case d: ArrayBasedMapData => s"map(${d.toString})"
     case other =>
       dataType match {
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/PullOutGroupingExpressions.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/PullOutGroupingExpressions.scala
index 1bd186d89a0..d950f582bb8 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/PullOutGroupingExpressions.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/PullOutGroupingExpressions.scala
@@ -53,7 +53,7 @@ object PullOutGroupingExpressions extends Rule[LogicalPlan] {
         val newGroupingExpressions = a.groupingExpressions.toIndexedSeq.map {
           case e if !e.foldable && e.children.nonEmpty =>
             complexGroupingExpressionMap
-              .getOrElseUpdate(e.canonicalized, Alias(e, 
s"_groupingexpression")())
+              .getOrElseUpdate(e.canonicalized, Alias(e, 
"_groupingexpression")())
               .toAttribute
           case o => o
         }
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
index a89f5a0f3ae..fed058a0e9a 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
@@ -3940,7 +3940,7 @@ class AstBuilder extends SqlBaseParserBaseVisitor[AnyRef] 
with SQLConfHelper wit
     }
 
     AlterColumn(
-      createUnresolvedTable(ctx.table, s"ALTER TABLE ... CHANGE COLUMN"),
+      createUnresolvedTable(ctx.table, "ALTER TABLE ... CHANGE COLUMN"),
       UnresolvedFieldName(columnNameParts),
       dataType = Option(ctx.colType().dataType()).map(typedVisit[DataType]),
       nullable = None,
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/HiveResult.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/HiveResult.scala
index 9a430f7e85e..602cca3327f 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/HiveResult.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/HiveResult.scala
@@ -85,7 +85,7 @@ object HiveResult {
     rows.map {
       case Row(name: String, dataType: String, comment) =>
         Seq(name, dataType, Option(comment.asInstanceOf[String]).getOrElse(""))
-          .map(s => String.format(s"%-20s", s))
+          .map(s => String.format("%-20s", s))
           .mkString("\t")
     }
   }
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashMapGenerator.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashMapGenerator.scala
index 713e7db4cf8..c33820ed85e 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashMapGenerator.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashMapGenerator.scala
@@ -129,7 +129,7 @@ abstract class HashMapGenerator(
   protected def generateRowIterator(): String
 
   protected final def generateClose(): String = {
-    s"""
+    """
        |public void close() {
        |  batch.close();
        |}
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/RowBasedHashMapGenerator.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/RowBasedHashMapGenerator.scala
index 44d19ad60d4..286aa1acd3c 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/RowBasedHashMapGenerator.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/RowBasedHashMapGenerator.scala
@@ -185,7 +185,7 @@ class RowBasedHashMapGenerator(
   }
 
   protected def generateRowIterator(): String = {
-    s"""
+    """
        |public org.apache.spark.unsafe.KVIterator<UnsafeRow, UnsafeRow> 
rowIterator() {
        |  return batch.rowIterator();
        |}
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala
index 0d356ec6f6b..76a8d3d942f 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala
@@ -153,7 +153,7 @@ trait GeneratePredicateHelper extends PredicateHelper {
       val nullCheck = if (bound.nullable) {
         s"${ev.isNull} || "
       } else {
-        s""
+        ""
       }
 
       s"""
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoinExec.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoinExec.scala
index 459bda6bf4b..f8d0df1b6e4 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoinExec.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoinExec.scala
@@ -237,7 +237,7 @@ case class BroadcastHashJoinExec(
            |${consume(ctx, input)}
          """.stripMargin
       } else if (broadcastRelation.value == HashedRelationWithAllNullKeys) {
-        s"""
+        """
            |// If the right side contains any all-null key, NAAJ simply 
returns Nothing.
          """.stripMargin
       } else {
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/ResolveWriteToStream.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/ResolveWriteToStream.scala
index fa2a7885eb9..d48fedd86ff 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/ResolveWriteToStream.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/ResolveWriteToStream.scala
@@ -76,7 +76,7 @@ object ResolveWriteToStream extends Rule[LogicalPlan] with 
SQLConfHelper {
     }.getOrElse {
       if (s.useTempCheckpointLocation) {
         deleteCheckpointOnStop = true
-        val tempDir = Utils.createTempDir(namePrefix = 
s"temporary").getCanonicalPath
+        val tempDir = Utils.createTempDir(namePrefix = 
"temporary").getCanonicalPath
         logWarning("Temporary checkpoint location created which is deleted 
normally when" +
           s" the query didn't fail: $tempDir. If it's required to delete it 
under any" +
           s" circumstances, please set 
${SQLConf.FORCE_DELETE_TEMP_CHECKPOINT_LOCATION.key} to" +
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala
index 7665bb91c6e..8783d0da6b9 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala
@@ -107,7 +107,7 @@ private[sql] object H2Dialect extends JdbcDialect {
       indexName: String,
       tableIdent: Identifier,
       options: JDBCOptions): Boolean = {
-    val sql = s"SELECT * FROM INFORMATION_SCHEMA.INDEXES WHERE " +
+    val sql = "SELECT * FROM INFORMATION_SCHEMA.INDEXES WHERE " +
       s"TABLE_SCHEMA = '${tableIdent.namespace().last}' AND " +
       s"TABLE_NAME = '${tableIdent.name()}' AND INDEX_NAME = '$indexName'"
     JdbcUtils.checkIfIndexExists(conn, sql, options)
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala
index 878d7a7cfe6..620423d9502 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala
@@ -176,7 +176,7 @@ private object PostgresDialect extends JdbcDialect with 
SQLConfHelper {
   override def getTableSample(sample: TableSampleInfo): String = {
     // hard-coded to BERNOULLI for now because Spark doesn't have a way to 
specify sample
     // method name
-    s"TABLESAMPLE BERNOULLI" +
+    "TABLESAMPLE BERNOULLI" +
       s" (${(sample.upperBound - sample.lowerBound) * 100}) REPEATABLE 
(${sample.seed})"
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to