Github user viirya commented on the issue:

    https://github.com/apache/spark/pull/19229
  
    Ran the similar benchmark as 
https://github.com/apache/spark/pull/18902#issuecomment-321727416:
    
    
    
    numColums | Old Mean | Old Median | New Mean | New Median
    -- | -- | -- | -- | --
    1 | 0.12906740590000002 | 0.087246649 | 0.1263591766 | 0.058268569299999996
    10 | 0.42224367090000003 | 0.2957120874 | 0.13829991330000002 | 0.0752307166
    100 | 6.931274417299998 | 7.2270134943 | 0.3018686074 | 0.2554692345
    
    The test code is the same basically but measuring transforming time now:
    
        import org.apache.spark.ml.feature._
        import org.apache.spark.sql.Row
        import org.apache.spark.sql.types._
        import spark.implicits._
        import scala.util.Random
    
        val seed = 123l
        val random = new Random(seed)
        val n = 10000
        val m = 100
        val rows = sc.parallelize(1 to n).map(i=> 
Row(Array.fill(m)(random.nextDouble): _*))
        val struct = new StructType(Array.range(0,m,1).map(i => 
StructField(s"c$i",DoubleType,true)))
        val df = spark.createDataFrame(rows, struct)
        df.persist()
        df.count()
    
        for (strategy <- Seq("mean", "median"); k <- Seq(1,10,100)) {
          val imputer = new 
Imputer().setStrategy(strategy).setInputCols(Array.range(0,k,1).map(i=>s"c$i")).setOutputCols(Array.range(0,k,1).map(i=>s"o$i"))
          var duration = 0.0
          for (i<- 0 until 10) {
            val model = imputer.fit(df)
            val start = System.nanoTime()
            model.transform(df)
            val end = System.nanoTime()
            duration += (end - start) / 1e9
          }
          println((strategy, k, duration/10))
        }


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to