This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new a09e9dc1531 [MINOR] Fix typos
a09e9dc1531 is described below

commit a09e9dc1531bdef905d4609945c7747622928905
Author: smallzhongfeng <zhongjingxi...@didiglobal.com>
AuthorDate: Tue Jan 3 09:57:51 2023 +0900

    [MINOR] Fix typos
    
    ### What changes were proposed in this pull request?
    
    Fix typo in ReceiverSupervisorImpl.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    No need.
    
    Closes #39340 from smallzhongfeng/fix-typos.
    
    Authored-by: smallzhongfeng <zhongjingxi...@didiglobal.com>
    Signed-off-by: Hyukjin Kwon <gurwls...@apache.org>
---
 core/src/main/scala/org/apache/spark/SparkContext.scala               | 4 ++--
 .../main/scala/org/apache/spark/ml/evaluation/ClusteringMetrics.scala | 4 ++--
 mllib/src/main/scala/org/apache/spark/ml/stat/Summarizer.scala        | 2 +-
 .../src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala   | 2 +-
 .../spark/sql/catalyst/expressions/CollectionExpressionsSuite.scala   | 2 +-
 .../spark/sql/catalyst/expressions/HigherOrderFunctionsSuite.scala    | 2 +-
 .../org/apache/spark/streaming/receiver/ReceiverSupervisorImpl.scala  | 2 +-
 7 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala 
b/core/src/main/scala/org/apache/spark/SparkContext.scala
index 5cbf2e83371..62e652ff9bb 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -3173,7 +3173,7 @@ object WritableConverter {
 
   implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] 
= {
     () => simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
-      // getBytes method returns array which is longer then data to be returned
+      // getBytes method returns array which is longer than data to be returned
       Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
     }
   }
@@ -3204,7 +3204,7 @@ object WritableConverter {
 
   implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = {
     simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
-      // getBytes method returns array which is longer then data to be returned
+      // getBytes method returns array which is longer than data to be returned
       Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
     }
   }
diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/evaluation/ClusteringMetrics.scala 
b/mllib/src/main/scala/org/apache/spark/ml/evaluation/ClusteringMetrics.scala
index 0106c872297..b8563bed601 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/evaluation/ClusteringMetrics.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/evaluation/ClusteringMetrics.scala
@@ -397,7 +397,7 @@ private[evaluation] object SquaredEuclideanSilhouette 
extends Silhouette {
     val clustersStatsMap = SquaredEuclideanSilhouette
       .computeClusterStats(dfWithSquaredNorm, predictionCol, featuresCol, 
weightCol)
 
-    // Silhouette is reasonable only when the number of clusters is greater 
then 1
+    // Silhouette is reasonable only when the number of clusters is greater 
than 1
     assert(clustersStatsMap.size > 1, "Number of clusters must be greater than 
one.")
 
     val bClustersStatsMap = 
dataset.sparkSession.sparkContext.broadcast(clustersStatsMap)
@@ -604,7 +604,7 @@ private[evaluation] object CosineSilhouette extends 
Silhouette {
     val clustersStatsMap = computeClusterStats(dfWithNormalizedFeatures, 
featuresCol,
       predictionCol, weightCol)
 
-    // Silhouette is reasonable only when the number of clusters is greater 
then 1
+    // Silhouette is reasonable only when the number of clusters is greater 
than 1
     assert(clustersStatsMap.size > 1, "Number of clusters must be greater than 
one.")
 
     val bClustersStatsMap = 
dataset.sparkSession.sparkContext.broadcast(clustersStatsMap)
diff --git a/mllib/src/main/scala/org/apache/spark/ml/stat/Summarizer.scala 
b/mllib/src/main/scala/org/apache/spark/ml/stat/Summarizer.scala
index bf9d07338db..8a124ae4f4c 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/stat/Summarizer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/stat/Summarizer.scala
@@ -105,7 +105,7 @@ object Summarizer extends Logging {
    * @return a builder.
    * @throws IllegalArgumentException if one of the metric names is not 
understood.
    *
-   * Note: Currently, the performance of this interface is about 2x~3x slower 
then using the RDD
+   * Note: Currently, the performance of this interface is about 2x~3x slower 
than using the RDD
    * interface.
    */
   @Since("2.3.0")
diff --git 
a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
 
b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
index 4980d7e1841..da4dd0cbb6b 100644
--- 
a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
+++ 
b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
@@ -203,7 +203,7 @@ private[yarn] class YarnAllocator(
 
   // The default profile is always present so we need to initialize the 
datastructures keyed by
   // ResourceProfile id to ensure its present if things start running before a 
request for
-  // executors could add it. This approach is easier then going and special 
casing everywhere.
+  // executors could add it. This approach is easier than going and special 
casing everywhere.
   private def initDefaultProfile(): Unit = synchronized {
     allocatedHostToContainersMapPerRPId(DEFAULT_RESOURCE_PROFILE_ID) =
       new HashMap[String, mutable.Set[ContainerId]]()
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionExpressionsSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionExpressionsSuite.scala
index 32b3840760f..aadfe7b9f1d 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionExpressionsSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CollectionExpressionsSuite.scala
@@ -2534,7 +2534,7 @@ class CollectionExpressionsSuite extends SparkFunSuite 
with ExpressionEvalHelper
       Literal.create(Seq(Float.NaN, null, 1f), ArrayType(FloatType))), true)
   }
 
-  test("SPARK-36740: ArrayMin/ArrayMax/SortArray should handle NaN greater 
then non-NaN value") {
+  test("SPARK-36740: ArrayMin/ArrayMax/SortArray should handle NaN greater 
than non-NaN value") {
     // ArrayMin
     checkEvaluation(ArrayMin(
       Literal.create(Seq(Double.NaN, 1d, 2d), ArrayType(DoubleType))), 1d)
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/HigherOrderFunctionsSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/HigherOrderFunctionsSuite.scala
index 5f62dc97086..fb77f1a482c 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/HigherOrderFunctionsSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/HigherOrderFunctionsSuite.scala
@@ -836,7 +836,7 @@ class HigherOrderFunctionsSuite extends SparkFunSuite with 
ExpressionEvalHelper
     assert(!mapFilter2_1.semanticEquals(mapFilter2_3))
   }
 
-  test("SPARK-36740: ArraySort should handle NaN greater then non-NaN value") {
+  test("SPARK-36740: ArraySort should handle NaN greater than non-NaN value") {
     checkEvaluation(arraySort(
       Literal.create(Seq(Double.NaN, 1d, 2d, null), ArrayType(DoubleType))),
       Seq(1d, 2d, Double.NaN, null))
diff --git 
a/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceiverSupervisorImpl.scala
 
b/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceiverSupervisorImpl.scala
index 13c80841d4d..948b5f6ecf1 100644
--- 
a/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceiverSupervisorImpl.scala
+++ 
b/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceiverSupervisorImpl.scala
@@ -215,7 +215,7 @@ private[streaming] class ReceiverSupervisorImpl(
   private def nextBlockId = StreamBlockId(streamId, newBlockId.getAndIncrement)
 
   private def cleanupOldBlocks(cleanupThreshTime: Time): Unit = {
-    logDebug(s"Cleaning up blocks older then $cleanupThreshTime")
+    logDebug(s"Cleaning up blocks older than $cleanupThreshTime")
     receivedBlockHandler.cleanupOldBlocks(cleanupThreshTime.milliseconds)
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to