Github user imatiach-msft commented on a diff in the pull request:

    https://github.com/apache/spark/pull/16630#discussion_r101356243
  
    --- Diff: 
mllib/src/test/scala/org/apache/spark/ml/regression/GeneralizedLinearRegressionSuite.scala
 ---
    @@ -1104,6 +1103,83 @@ class GeneralizedLinearRegressionSuite
           .fit(datasetGaussianIdentity.as[LabeledPoint])
       }
     
    +
    +  test("glm summary: feature name") {
    +    // dataset1 with no attribute
    +    val dataset1 = Seq(
    +      Instance(2.0, 1.0, Vectors.dense(0.0, 5.0)),
    +      Instance(8.0, 2.0, Vectors.dense(1.0, 7.0)),
    +      Instance(3.0, 3.0, Vectors.dense(2.0, 11.0)),
    +      Instance(9.0, 4.0, Vectors.dense(3.0, 13.0)),
    +      Instance(2.0, 5.0, Vectors.dense(2.0, 3.0))
    +    ).toDF()
    +
    +    // dataset2 with attribute
    +    val datasetTmp = Seq(
    +      (2.0, 1.0, 0.0, 5.0),
    +      (8.0, 2.0, 1.0, 7.0),
    +      (3.0, 3.0, 2.0, 11.0),
    +      (9.0, 4.0, 3.0, 13.0),
    +      (2.0, 5.0, 2.0, 3.0)
    +    ).toDF("y", "w", "x1", "x2")
    +    val formula = new RFormula().setFormula("y ~ x1 + x2")
    +    val dataset2 = formula.fit(datasetTmp).transform(datasetTmp)
    +
    +    val expectedFeature = Seq(Array("V1", "V2"), Array("x1", "x2"))
    +
    +    var idx = 0
    +    for (dataset <- Seq(dataset1, dataset2)) {
    +      val model = new GeneralizedLinearRegression().fit(dataset)
    +      model.summary.featureName.zip(expectedFeature(idx))
    +        .foreach{ x => assert(x._1 === x._2) }
    +      idx += 1
    +    }
    +  }
    +
    +  test("glm summary: summaryTable") {
    +    val dataset = Seq(
    +      Instance(2.0, 1.0, Vectors.dense(0.0, 5.0)),
    +      Instance(8.0, 2.0, Vectors.dense(1.0, 7.0)),
    +      Instance(3.0, 3.0, Vectors.dense(2.0, 11.0)),
    +      Instance(9.0, 4.0, Vectors.dense(3.0, 13.0)),
    +      Instance(2.0, 5.0, Vectors.dense(2.0, 3.0))
    +    ).toDF()
    +
    +    val expectedFeature = Seq(Array("V1", "V2"),
    +      Array("Intercept", "V1", "V2"))
    +    val expectedEstimate = Seq(Vectors.dense(0.2884, 0.538),
    +      Vectors.dense(0.7903, 0.2258, 0.4677))
    +    val expectedStdError = Seq(Vectors.dense(1.724, 0.3787),
    +      Vectors.dense(4.0129, 2.1153, 0.5815))
    +    val expectedTValue = Seq(Vectors.dense(0.1673, 1.4205),
    +      Vectors.dense(0.1969, 0.1067, 0.8043))
    +    val expectedPValue = Seq(Vectors.dense(0.8778, 0.2506),
    +      Vectors.dense(0.8621, 0.9247, 0.5056))
    +
    +    var idx = 0
    +    for (fitIntercept <- Seq(false, true)) {
    +      val trainer = new GeneralizedLinearRegression()
    +        .setFamily("gaussian")
    --- End diff --
    
    I would usually prefer to use variables wherever possible as it is much 
easier to update through various editors and in general it is much easier to 
catch compile time vs runtime errors.  But it is a minor point, and it looks 
like this is consistent with most of the spark codebase.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to