zhengruifeng commented on pull request #28471:
URL: https://github.com/apache/spark/pull/28471#issuecomment-625599005


   testCode:
   ```
   
   import org.apache.spark.ml.regression._
   import org.apache.spark.storage.StorageLevel
   
   
   val df = spark.read.option("numFeatures", 
"2000").format("libsvm").load("/data1/Datasets/epsilon/epsilon_normalized.t")
   df.persist(StorageLevel.MEMORY_AND_DISK)
   df.count
   
   // Default SquaredError
   val lir = new LinearRegression().setMaxIter(50).setSolver("l-bfgs")
   lr.fit(df)
   
   val results = Seq(1, 4, 16, 64, 256, 1024, 4096).map { size => val start = 
System.currentTimeMillis; val model = lir.setBlockSize(size).fit(df); val end = 
System.currentTimeMillis; (size, model, end - start) }
   
   
   // Huber
   val lir = new 
LinearRegression().setMaxIter(50).setSolver("l-bfgs").setLoss("huber")
   
   val results = Seq(1, 4, 16, 64, 256, 1024, 4096).map { size => val start = 
System.currentTimeMillis; val model = lir.setBlockSize(size).fit(df); val end = 
System.currentTimeMillis; (size, model, end - start) }
   ```
   
   
   Result:
   **Huber**
   ```
   scala> results.foreach(t => println(t._2.coefficients.toString.take(100)))
   
[0.6424729506929868,-1.2292544634895406,1.7757284244387601,0.2974549255492886,0.935593736145218,-0.7
   
[0.6425694454021473,-1.22927248948364,1.7757268011427252,0.2974675136619897,0.9357357899375097,-0.77
   
[0.6424853073405433,-1.2292568932224641,1.775728104037945,0.29745659508182537,0.9356121907344618,-0.
   
[0.6429959531002037,-1.229354196708174,1.7757175326389232,0.29752415047198205,0.9363681130113419,-0.
   
[0.6424842544336862,-1.2292566837366543,1.775728142782258,0.29745645263402737,0.9356106116206417,-0.
   
[0.6430623639968113,-1.2293667308116527,1.7757163246117786,0.29753287650971944,0.9364661504709426,-0
   
[0.6421067597074761,-1.229184819802838,1.7757358482626238,0.2974067609553463,0.9350521980706877,-0.7
   
   scala> 
   
   scala> results.map(_._2.summary.totalIterations)
   res22: Seq[Int] = List(51, 51, 51, 51, 51, 51, 51)
   
   scala> results.map(_._3)
   res23: Seq[Long] = List(135189, 12046, 11783, 14307, 14399, 14026, 14329)
   ```
   Up to 11X speedup
   
   
   **SquaredError**
   ```
   
   scala> results.foreach(t => println(t._2.coefficients.toString.take(100)))
   
[-0.2652587613623121,-0.0707048016667831,0.420750805149307,0.09194452205365045,0.05059855709172461,0
   
[-0.26525865677193483,-0.07070480422610806,0.42075057109293873,0.09194450660409814,0.050598582385999
   
[-0.26526635878841553,-0.07070459501522909,0.42076764197826116,0.09194564264821924,0.050596757952969
   
[-0.26525612879310023,-0.07070486362689087,0.42074489424648864,0.09194413298339,0.050599198284442995
   
[-0.2652611612036013,-0.07070474076806575,0.42075615827182555,0.09194487637000792,0.0505979807182646
   
[-0.26526190169346425,-0.07070472114668785,0.42075780340712315,0.0919449856303737,0.0505978044045689
   
[-0.265262317030222,-0.07070470997227678,0.42075872481615506,0.09194504690071631,0.05059770582212817
   
   scala> results.map(_._2.summary.totalIterations)
   res26: Seq[Int] = List(51, 51, 51, 51, 51, 51, 51)
   
   scala> results.map(_._3)
   res27: Seq[Long] = List(71269, 11828, 12254, 15331, 14963, 14420, 14022)
   
   ```
   Up to 6X speedup


----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to