LuciferYang commented on code in PR #43526:
URL: https://github.com/apache/spark/pull/43526#discussion_r1398837518


##########
examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala:
##########
@@ -220,7 +220,8 @@ object SparkSQLExample {
     // +------------+
 
     // No pre-defined encoders for Dataset[Map[K,V]], define explicitly
-    implicit val mapEncoder = org.apache.spark.sql.Encoders.kryo[Map[String, 
Any]]
+    implicit val mapEncoder: Encoder[Map[String, Any]] =

Review Comment:
   For the modifications to the `Example` section, we can perform manual 
verification to ensure their availability and correctness. This should also be 
reflected in the pr description, specifically in the section about how testing.



##########
core/src/main/scala/org/apache/spark/deploy/FaultToleranceTest.scala:
##########
@@ -340,7 +341,7 @@ private object FaultToleranceTest extends App with Logging {
 private class TestMasterInfo(val ip: String, val dockerId: DockerId, val 
logFile: File)
   extends Logging  {
 
-  implicit val formats = org.json4s.DefaultFormats
+  implicit val formats: Formats = org.json4s.DefaultFormats

Review Comment:
   Although this seems correct, there is no mechanism to guarantee the 
correctness and availability of this file.
   
   



##########
core/src/test/scala/org/apache/spark/deploy/worker/WorkerSuite.scala:
##########
@@ -23,7 +23,7 @@ import java.util.function.Supplier
 
 import scala.concurrent.duration._
 
-import org.json4s.{DefaultFormats, Extraction}
+import org.json4s._

Review Comment:
   ditto



##########
mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.scala:
##########
@@ -188,7 +188,7 @@ object BisectingKMeansModel extends 
Loader[BisectingKMeansModel] {
     }
 
     def load(sc: SparkContext, path: String): BisectingKMeansModel = {
-      implicit val formats: DefaultFormats = DefaultFormats

Review Comment:
   We can unify the incremental cases to `DefaultFormats` or `Formats`, but I 
think there is no need to change the existing cases.
   
   



##########
core/src/test/scala/org/apache/spark/util/KeyLockSuite.scala:
##########
@@ -29,7 +29,7 @@ import org.apache.spark.SparkFunSuite
 class KeyLockSuite extends SparkFunSuite with TimeLimits {
 
   // Necessary to make ScalaTest 3.x interrupt a thread on the JVM like 
ScalaTest 2.2.x
-  private implicit val defaultSignaler = ThreadSignaler
+  private implicit val defaultSignaler: ThreadSignaler.type = ThreadSignaler

Review Comment:
   Signaler



##########
mllib/src/main/scala/org/apache/spark/ml/linalg/JsonMatrixConverter.scala:
##########
@@ -16,7 +16,7 @@
  */
 package org.apache.spark.ml.linalg
 
-import org.json4s.DefaultFormats
+import org.json4s._

Review Comment:
   ```scala
   import org.json4s.{DefaultFormats, Formats}
   ```
   



##########
core/src/test/scala/org/apache/spark/resource/ResourceUtilsSuite.scala:
##########
@@ -21,7 +21,7 @@ import java.io.File
 import java.nio.file.{Files => JavaFiles}
 import java.util.Optional
 
-import org.json4s.{DefaultFormats, Extraction}
+import org.json4s._

Review Comment:
   ditto



##########
core/src/test/scala/org/apache/spark/SparkContextSuite.scala:
##########
@@ -32,7 +32,7 @@ import org.apache.hadoop.io.{BytesWritable, LongWritable, 
Text}
 import org.apache.hadoop.mapred.TextInputFormat
 import org.apache.hadoop.mapreduce.lib.input.{TextInputFormat => 
NewTextInputFormat}
 import org.apache.logging.log4j.{Level, LogManager}
-import org.json4s.{DefaultFormats, Extraction}
+import org.json4s._

Review Comment:
   I tend to use `import org.json4s.{DefaultFormats, Extraction, Formats}`



##########
mllib/src/main/scala/org/apache/spark/mllib/feature/Word2Vec.scala:
##########
@@ -23,7 +23,7 @@ import scala.collection.mutable
 import scala.jdk.CollectionConverters._
 
 import com.google.common.collect.{Ordering => GuavaOrdering}
-import org.json4s.DefaultFormats
+import org.json4s._

Review Comment:
   import org.json4s.{DefaultFormats, Formats}



##########
mllib/src/main/scala/org/apache/spark/ml/r/ALSWrapper.scala:
##########
@@ -18,7 +18,7 @@
 package org.apache.spark.ml.r
 
 import org.apache.hadoop.fs.Path
-import org.json4s._
+import org.json4s.{DefaultFormats, Formats}

Review Comment:
   please revert this line



##########
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/CompactibleFileStreamLog.scala:
##########
@@ -140,7 +137,9 @@ abstract class CompactibleFileStreamLog[T <: AnyRef : 
ClassTag](
     out.write(Serialization.write(entry).getBytes(UTF_8))
   }
 
-  private def deserializeEntry(line: String): T = Serialization.read[T](line)
+  private def deserializeEntry(line: String): T = {

Review Comment:
   why need change this line?



##########
resource-managers/yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnSchedulerBackend.scala:
##########
@@ -64,14 +64,14 @@ private[spark] abstract class YarnSchedulerBackend(
   private val yarnSchedulerEndpointRef = rpcEnv.setupEndpoint(
     YarnSchedulerBackend.ENDPOINT_NAME, yarnSchedulerEndpoint)
 
-  private implicit val askTimeout = RpcUtils.askRpcTimeout(sc.conf)
+  private implicit val askTimeout: Any = RpcUtils.askRpcTimeout(sc.conf)

Review Comment:
   why  this type is  `Any`?



##########
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala:
##########
@@ -47,16 +46,16 @@ import org.apache.spark.util.ArrayImplicits._
  * Note: [[HDFSMetadataLog]] doesn't support S3-like file systems as they 
don't guarantee listing
  * files in a directory always shows the latest files.
  */
-class HDFSMetadataLog[T <: AnyRef : ClassTag](sparkSession: SparkSession, 
path: String)
-  extends MetadataLog[T] with Logging {
-
-  private implicit val formats = Serialization.formats(NoTypeHints)
+class HDFSMetadataLog[T <: AnyRef: ClassTag](sparkSession: SparkSession, path: 
String)(
+    private final implicit val manifest: Manifest[T])
+    extends MetadataLog[T]
+    with Logging {
 
-  /** Needed to serialize type T into JSON when using Jackson */
-  private implicit val manifest = 
Manifest.classType[T](implicitly[ClassTag[T]].runtimeClass)
+  private implicit val formats: Formats = Serialization.formats(NoTypeHints)
 
   // Avoid serializing generic sequences, see SPARK-17372
-  require(implicitly[ClassTag[T]].runtimeClass != classOf[Seq[_]],
+  require(

Review Comment:
   Just add a newline? If so, please revert this change
   
   



##########
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala:
##########
@@ -35,7 +35,6 @@ import org.apache.spark.sql.errors.QueryExecutionErrors
 import org.apache.spark.sql.internal.SQLConf
 import org.apache.spark.util.ArrayImplicits._
 
-

Review Comment:
   please revert this change



##########
mllib/src/main/scala/org/apache/spark/mllib/fpm/FPGrowth.scala:
##########
@@ -25,7 +25,7 @@ import scala.jdk.CollectionConverters._
 import scala.reflect.ClassTag
 import scala.reflect.runtime.universe._
 
-import org.json4s.DefaultFormats
+import org.json4s._

Review Comment:
   import org.json4s.{DefaultFormats, Formats}



##########
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLog.scala:
##########
@@ -47,16 +46,16 @@ import org.apache.spark.util.ArrayImplicits._
  * Note: [[HDFSMetadataLog]] doesn't support S3-like file systems as they 
don't guarantee listing
  * files in a directory always shows the latest files.
  */
-class HDFSMetadataLog[T <: AnyRef : ClassTag](sparkSession: SparkSession, 
path: String)
-  extends MetadataLog[T] with Logging {
-
-  private implicit val formats = Serialization.formats(NoTypeHints)
+class HDFSMetadataLog[T <: AnyRef: ClassTag](sparkSession: SparkSession, path: 
String)(
+    private final implicit val manifest: Manifest[T])

Review Comment:
   why we need add this implicit definition?



##########
sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala:
##########
@@ -441,7 +441,7 @@ class HiveThriftBinaryServerSuite extends 
HiveThriftServer2Test {
         s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE 
test_map")
 
       queries.foreach(statement.execute)
-      implicit val ec = ExecutionContext.fromExecutorService(
+      implicit val ec: ExecutionContextExecutorService = 
ExecutionContext.fromExecutorService(

Review Comment:
   Just a query, is `ec.shutdownNow()` needed everywhere?
   
   



##########
mllib/src/main/scala/org/apache/spark/ml/linalg/JsonVectorConverter.scala:
##########
@@ -17,7 +17,7 @@
 
 package org.apache.spark.ml.linalg
 
-import org.json4s.DefaultFormats
+import org.json4s._

Review Comment:
   ```scala
   import org.json4s.{DefaultFormats, Formats}
   ```



##########
mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala:
##########
@@ -18,7 +18,7 @@
 package org.apache.spark.ml.r
 
 import org.apache.hadoop.fs.Path
-import org.json4s._
+import org.json4s.{DefaultFormats, Formats}

Review Comment:
   please revert this line



##########
mllib/src/main/scala/org/apache/spark/mllib/regression/RegressionModel.scala:
##########
@@ -17,7 +17,7 @@
 
 package org.apache.spark.mllib.regression
 
-import org.json4s.{DefaultFormats, JValue}
+import org.json4s._

Review Comment:
   import org.json4s.{DefaultFormats, Formats, JValue}
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to