This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new d072833d50a [SPARK-45698][CORE][SQL][SS] Clean up the deprecated API 
usage related to `Buffer`
d072833d50a is described below

commit d072833d50abd1a6630dd629e3560e3d0ad929cf
Author: yangjie01 <yangji...@baidu.com>
AuthorDate: Fri Oct 27 12:59:22 2023 +0300

    [SPARK-45698][CORE][SQL][SS] Clean up the deprecated API usage related to 
`Buffer`
    
    ### What changes were proposed in this pull request?
    This PR cleans up the use of the following APIs in `s.c.mutable.Buffer`, as 
they have been deprecated after Scala 2.13.0 or Scala 2.13.4:
    
    - `trimStart` -> `dropInPlace`
    - `trimEnd` -> `dropRightInPlace`
    - `append(elems: A*)` -> `appendAll`
    - `prepend(elems: A*)` -> `prependAll(elems)`
    
    ```
      deprecated("use dropInPlace instead", since = "2.13.4")
      def trimStart(n: Int): Unit = dropInPlace(n)
    
      deprecated("use dropRightInPlace instead", since = "2.13.4")
      def trimEnd(n: Int): Unit = dropRightInPlace(n)
    
      deprecated("Use appendAll instead", "2.13.0")
      `inline` final def append(elems: A*): this.type = addAll(elems)
    
      deprecated("Use prependAll instead", "2.13.0")
      `inline` final def prepend(elems: A*): this.type = prependAll(elems)
    ```
    
    ### Why are the changes needed?
    Clean up deprecated API usage.
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Pass GitHub Acitons.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    No
    
    Closes #43551 from LuciferYang/buffer-deprecated.
    
    Authored-by: yangjie01 <yangji...@baidu.com>
    Signed-off-by: Max Gekk <max.g...@gmail.com>
---
 core/src/main/scala/org/apache/spark/deploy/master/Master.scala   | 4 ++--
 core/src/main/scala/org/apache/spark/util/SizeEstimator.scala     | 2 +-
 .../org/apache/spark/util/collection/ExternalAppendOnlyMap.scala  | 2 +-
 core/src/test/scala/org/apache/spark/deploy/IvyTestUtils.scala    | 2 +-
 .../src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala | 4 ++--
 .../org/apache/spark/sql/execution/arrow/ArrowConverters.scala    | 2 +-
 .../streaming/continuous/ContinuousTextSocketSource.scala         | 2 +-
 .../scala/org/apache/spark/sql/execution/streaming/memory.scala   | 2 +-
 .../execution/streaming/sources/TextSocketMicroBatchStream.scala  | 2 +-
 sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala      | 8 ++++----
 10 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/deploy/master/Master.scala 
b/core/src/main/scala/org/apache/spark/deploy/master/Master.scala
index 46503e017ea..29022c7419b 100644
--- a/core/src/main/scala/org/apache/spark/deploy/master/Master.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/master/Master.scala
@@ -1042,7 +1042,7 @@ private[deploy] class Master(
         completedApps.take(toRemove).foreach { a =>
           applicationMetricsSystem.removeSource(a.appSource)
         }
-        completedApps.trimStart(toRemove)
+        completedApps.dropInPlace(toRemove)
       }
       completedApps += app // Remember it in our history
       waitingApps -= app
@@ -1204,7 +1204,7 @@ private[deploy] class Master(
         drivers -= driver
         if (completedDrivers.size >= retainedDrivers) {
           val toRemove = math.max(retainedDrivers / 10, 1)
-          completedDrivers.trimStart(toRemove)
+          completedDrivers.dropInPlace(toRemove)
         }
         completedDrivers += driver
         persistenceEngine.removeDriver(driver)
diff --git a/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala 
b/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala
index 704aeaefa55..1447a3e752d 100644
--- a/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala
+++ b/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala
@@ -180,7 +180,7 @@ object SizeEstimator extends Logging {
 
     def dequeue(): AnyRef = {
       val elem = stack.last
-      stack.trimEnd(1)
+      stack.dropRightInPlace(1)
       elem
     }
   }
diff --git 
a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
 
b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
index 3afbe322b6e..8224472b754 100644
--- 
a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
+++ 
b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala
@@ -362,7 +362,7 @@ class ExternalAppendOnlyMap[K, V, C](
     private def removeFromBuffer[T](buffer: ArrayBuffer[T], index: Int): T = {
       val elem = buffer(index)
       buffer(index) = buffer(buffer.size - 1)  // This also works if index == 
buffer.size - 1
-      buffer.trimEnd(1)
+      buffer.dropRightInPlace(1)
       elem
     }
 
diff --git a/core/src/test/scala/org/apache/spark/deploy/IvyTestUtils.scala 
b/core/src/test/scala/org/apache/spark/deploy/IvyTestUtils.scala
index 0dcdba3dfb8..0b970a03ad8 100644
--- a/core/src/test/scala/org/apache/spark/deploy/IvyTestUtils.scala
+++ b/core/src/test/scala/org/apache/spark/deploy/IvyTestUtils.scala
@@ -316,7 +316,7 @@ private[deploy] object IvyTestUtils {
       }
       if (withR) {
         val rFiles = createRFiles(root, className, artifact.groupId)
-        allFiles.append(rFiles: _*)
+        allFiles.appendAll(rFiles)
       }
       val jarFile = packJar(jarPath, artifact, allFiles.toSeq, useIvyLayout, 
withR)
       assert(jarFile.exists(), "Problem creating Jar file")
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
index bbd74a1fe74..d93a83dec44 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
@@ -536,13 +536,13 @@ abstract class SparkPlan extends QueryPlan[SparkPlan] 
with Logging with Serializ
         while (buf.length < n && i < res.length) {
           val rows = decodeUnsafeRows(res(i)._2)
           if (n - buf.length >= res(i)._1) {
-            buf.prepend(rows.toArray[InternalRow]: _*)
+            buf.prependAll(rows.toArray[InternalRow])
           } else {
             val dropUntil = res(i)._1 - (n - buf.length)
             // Same as Iterator.drop but this only takes a long.
             var j: Long = 0L
             while (j < dropUntil) { rows.next(); j += 1L}
-            buf.prepend(rows.toArray[InternalRow]: _*)
+            buf.prependAll(rows.toArray[InternalRow])
           }
           i += 1
         }
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/arrow/ArrowConverters.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/arrow/ArrowConverters.scala
index a0dd939dda2..d6bf1e29edd 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/arrow/ArrowConverters.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/arrow/ArrowConverters.scala
@@ -315,7 +315,7 @@ private[sql] object ArrowConverters extends Logging {
       val reader =
         new ArrowStreamReader(new ByteArrayInputStream(arrowBatchIter.next()), 
allocator)
       val root = if (reader.loadNextBatch()) reader.getVectorSchemaRoot else 
null
-      resources.append(reader, root)
+      resources.appendAll(Seq(reader, root))
       if (root == null) {
         (Iterator.empty, null)
       } else {
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousTextSocketSource.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousTextSocketSource.scala
index d1b346b4174..3f5f81bee4e 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousTextSocketSource.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousTextSocketSource.scala
@@ -144,7 +144,7 @@ class TextSocketContinuousStream(
           " for partition " + partition + ". Max valid offset: " + max)
         }
         val n = offset - startOffset.offsets(partition)
-        buckets(partition).trimStart(n)
+        buckets(partition).dropInPlace(n)
     }
     startOffset = endOffset
     recordEndpoint.setStartOffsets(startOffset.offsets)
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/memory.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/memory.scala
index 8fe5319ef4c..826543fd565 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/memory.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/memory.scala
@@ -279,7 +279,7 @@ case class MemoryStream[A : Encoder](
         s"Offsets committed out of order: $lastOffsetCommitted followed by 
$end")
     }
 
-    batches.trimStart(offsetDiff)
+    batches.dropInPlace(offsetDiff)
     lastOffsetCommitted = newOffset
   }
 
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/TextSocketMicroBatchStream.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/TextSocketMicroBatchStream.scala
index 580f7066e44..a01f40bead8 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/TextSocketMicroBatchStream.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/TextSocketMicroBatchStream.scala
@@ -159,7 +159,7 @@ class TextSocketMicroBatchStream(host: String, port: Int, 
numPartitions: Int)
         s"Offsets committed out of order: $lastOffsetCommitted followed by 
$end")
     }
 
-    batches.trimStart(offsetDiff)
+    batches.dropInPlace(offsetDiff)
     lastOffsetCommitted = newOffset
   }
 
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
index 45b80d81e73..8c293bab641 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
@@ -756,10 +756,10 @@ class JoinSuite extends QueryTest with SharedSparkSession 
with AdaptiveSparkPlan
       }
 
       val expected = new ListBuffer[Row]()
-      expected.append(
+      expected.appendAll(Seq(
         Row(1, "1", 1, 1), Row(1, "1", 1, 2),
         Row(2, "2", 2, 1), Row(2, "2", 2, 2),
-        Row(3, "3", 3, 1), Row(3, "3", 3, 2)
+        Row(3, "3", 3, 1), Row(3, "3", 3, 2))
       )
       for (i <- 4 to 100) {
         expected.append(Row(i, i.toString, null, null))
@@ -830,10 +830,10 @@ class JoinSuite extends QueryTest with SharedSparkSession 
with AdaptiveSparkPlan
       }
 
       val expected = new ListBuffer[Row]()
-      expected.append(
+      expected.appendAll(Seq(
         Row(1, "1", 1, 1), Row(1, "1", 1, 2),
         Row(2, "2", 2, 1), Row(2, "2", 2, 2),
-        Row(3, "3", 3, 1), Row(3, "3", 3, 2)
+        Row(3, "3", 3, 1), Row(3, "3", 3, 2))
       )
       for (i <- 4 to 100) {
         expected.append(Row(i, i.toString, null, null))


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to