Github user mengxr commented on a diff in the pull request:

    https://github.com/apache/spark/pull/5270#discussion_r28716770
  
    --- Diff: 
mllib/src/main/scala/org/apache/spark/mllib/regression/IsotonicRegression.scala 
---
    @@ -124,6 +131,74 @@ class IsotonicRegressionModel (
           predictions(foundIndex)
         }
       }
    +
    +  override def save(sc: SparkContext, path: String): Unit = {
    +    IsotonicRegressionModel.SaveLoadV1_0.save(sc, path, boundaries, 
predictions, isotonic)
    +  }
    +
    +  override protected def formatVersion: String = "1.0"
    +}
    +
    +object IsotonicRegressionModel extends Loader[IsotonicRegressionModel] {
    +
    +  import org.apache.spark.mllib.util.Loader._
    +
    +  private object SaveLoadV1_0 {
    +
    +    def thisFormatVersion: String = "1.0"
    +
    +    /** Hard-code class name string in case it changes in the future */
    +    def thisClassName: String = 
"org.apache.spark.mllib.regression.IsotonicRegressionModel"
    +
    +    /** Model data for model import/export */
    +    case class Data(boundary: Double, prediction: Double)
    +
    +    def save(
    +        sc: SparkContext, 
    +        path: String, 
    +        boundaries: Array[Double], 
    +        predictions: Array[Double], 
    +        isotonic: Boolean): Unit = {
    +      val sqlContext = new SQLContext(sc)
    +      import sqlContext.implicits._
    +
    +      val metadata = compact(render(
    +        ("class" -> thisClassName) ~ ("version" -> thisFormatVersion) ~ 
    +          ("isotonic" -> isotonic)))
    +      sc.parallelize(Seq(metadata), 1).saveAsTextFile(metadataPath(path))
    +
    +      sqlContext.createDataFrame(boundaries.toSeq.zip(predictions)
    +        .map { case (b, p) => Data(b, p) 
}).saveAsParquetFile(dataPath(path))
    +    }
    +
    +    def load(sc: SparkContext, path: String): (Array[Double], 
Array[Double]) = {
    +      val sqlContext = new SQLContext(sc)
    +      val dataRDD = sqlContext.parquetFile(dataPath(path))
    +
    +      checkSchema[Data](dataRDD.schema)
    +      val dataArray = dataRDD.select("boundary", "prediction").collect()
    +      val (boundaries, predictions) = dataArray.map {
    +        x => (x.getDouble(0), x.getDouble(1)) }.toList.sortBy(_._1).unzip
    --- End diff --
    
    Same here.
    
    ~~~scala
    val (boundaries, predictions) = dataArray.map { x =>
      (x.getDouble(0), x.getDouble(1))
    }.toList.sortBy(_._1).unzip
    ~~~


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to