Github user sethah commented on a diff in the pull request:

    https://github.com/apache/spark/pull/14834#discussion_r78105821
  
    --- Diff: 
mllib/src/test/scala/org/apache/spark/ml/classification/LogisticRegressionSuite.scala
 ---
    @@ -827,67 +1743,117 @@ class LogisticRegressionSuite
     
       }
     
    -  test("binary logistic regression with weighted samples") {
    -    val (dataset, weightedDataset) = {
    -      val nPoints = 1000
    -      val coefficients = Array(-0.57997, 0.912083, -0.371077, -0.819866, 
2.688191)
    -      val xMean = Array(5.843, 3.057, 3.758, 1.199)
    -      val xVariance = Array(0.6856, 0.1899, 3.116, 0.581)
    -      val testData =
    -        generateMultinomialLogisticInput(coefficients, xMean, xVariance, 
true, nPoints, 42)
    -
    -      // Let's over-sample the positive samples twice.
    -      val data1 = testData.flatMap { case labeledPoint: LabeledPoint =>
    -        if (labeledPoint.label == 1.0) {
    -          Iterator(labeledPoint, labeledPoint)
    -        } else {
    -          Iterator(labeledPoint)
    -        }
    -      }
    +  test("binary logistic regression with weighted data") {
    +    val numClasses = 2
    +    val numPoints = 40
    +    val outlierData = 
MLTestingUtils.genClassificationInstancesWithWeightedOutliers(spark,
    +      numClasses, numPoints)
    +    val testData = 
spark.createDataFrame(Array.tabulate[LabeledPoint](numClasses) { i =>
    +      LabeledPoint(i.toDouble, Vectors.dense(i.toDouble))
    +    })
    +    val lr = new LogisticRegression().setWeightCol("weight")
    +    val model = lr.fit(outlierData)
    +    val results = model.transform(testData).select("label", 
"prediction").collect()
    +
    +    // check that the predictions are the one to one mapping
    +    results.foreach { case Row(label: Double, pred: Double) =>
    +      assert(label === pred)
    +    }
    +    val (overSampledData, weightedData) =
    +      
MLTestingUtils.genEquivalentOversampledAndWeightedInstances(outlierData, 
"label", "features",
    +        42L)
    +    val weightedModel = lr.fit(weightedData)
    +    val overSampledModel = lr.setWeightCol("").fit(overSampledData)
    +    assert(weightedModel.coefficientMatrix ~== 
overSampledModel.coefficientMatrix relTol 0.01)
    +  }
     
    -      val rnd = new Random(8392)
    -      val data2 = testData.flatMap { case LabeledPoint(label: Double, 
features: Vector) =>
    -        if (rnd.nextGaussian() > 0.0) {
    -          if (label == 1.0) {
    -            Iterator(
    -              Instance(label, 1.2, features),
    -              Instance(label, 0.8, features),
    -              Instance(0.0, 0.0, features))
    -          } else {
    -            Iterator(
    -              Instance(label, 0.3, features),
    -              Instance(1.0, 0.0, features),
    -              Instance(label, 0.1, features),
    -              Instance(label, 0.6, features))
    -          }
    -        } else {
    -          if (label == 1.0) {
    -            Iterator(Instance(label, 2.0, features))
    -          } else {
    -            Iterator(Instance(label, 1.0, features))
    -          }
    -        }
    -      }
    +  test("multinomial logistic regression with weighted data") {
    +    val numClasses = 5
    +    val numPoints = 40
    +    val outlierData = 
MLTestingUtils.genClassificationInstancesWithWeightedOutliers(spark,
    +      numClasses, numPoints)
    +    val testData = 
spark.createDataFrame(Array.tabulate[LabeledPoint](numClasses) { i =>
    +      LabeledPoint(i.toDouble, Vectors.dense(i.toDouble))
    +    })
    +    val mlr = new LogisticRegression().setWeightCol("weight")
    +    val model = mlr.fit(outlierData)
    +    val results = model.transform(testData).select("label", 
"prediction").collect()
    +
    +    // check that the predictions are the one to one mapping
    +    results.foreach { case Row(label: Double, pred: Double) =>
    +      assert(label === pred)
    +    }
    +    val (overSampledData, weightedData) =
    +      
MLTestingUtils.genEquivalentOversampledAndWeightedInstances(outlierData, 
"label", "features",
    +        42L)
    +    val weightedModel = mlr.fit(weightedData)
    +    val overSampledModel = mlr.setWeightCol("").fit(overSampledData)
    +    assert(weightedModel.coefficientMatrix ~== 
overSampledModel.coefficientMatrix relTol 0.01)
    +  }
     
    -      (spark.createDataFrame(sc.parallelize(data1, 4)),
    -        spark.createDataFrame(sc.parallelize(data2, 4)))
    +  test("set family") {
    +    val lr = new LogisticRegression().setMaxIter(1)
    +    // don't set anything for binary classification
    +    val model1 = lr.fit(binaryDataset)
    +    assert(model1.coefficientMatrix.numRows === 1 && 
model1.coefficientMatrix.numCols === 4)
    +    assert(model1.interceptVector.size === 1)
    +
    +    // set to multinomial for binary classification
    +    val model2 = lr.setFamily("multinomial").fit(binaryDataset)
    +    assert(model2.coefficientMatrix.numRows === 2 && 
model2.coefficientMatrix.numCols === 4)
    +    assert(model2.interceptVector.size === 2)
    +
    +    // set to binary for binary classification
    +    val model3 = lr.setFamily("binomial").fit(binaryDataset)
    +    assert(model3.coefficientMatrix.numRows === 1 && 
model3.coefficientMatrix.numCols === 4)
    +    assert(model3.interceptVector.size === 1)
    +
    +    // don't set anything for multiclass classification
    +    val mlr = new LogisticRegression().setMaxIter(1)
    +    val model4 = mlr.fit(multinomialDataset)
    +    assert(model4.coefficientMatrix.numRows === 3 && 
model4.coefficientMatrix.numCols === 4)
    +    assert(model4.interceptVector.size === 3)
    +
    +    // set to binary for multiclass classification
    +    mlr.setFamily("binomial")
    +    val thrown = intercept[IllegalArgumentException] {
    +      mlr.fit(multinomialDataset)
         }
    +    assert(thrown.getMessage.contains("Binomial family only supports 1 or 
2 outcome classes"))
     
    -    val trainer1a = (new LogisticRegression).setFitIntercept(true)
    -      .setRegParam(0.0).setStandardization(true)
    -    val trainer1b = (new 
LogisticRegression).setFitIntercept(true).setWeightCol("weight")
    -      .setRegParam(0.0).setStandardization(true)
    -    val model1a0 = trainer1a.fit(dataset)
    -    val model1a1 = trainer1a.fit(weightedDataset)
    -    val model1b = trainer1b.fit(weightedDataset)
    -    assert(model1a0.coefficients !~= model1a1.coefficients absTol 1E-3)
    -    assert(model1a0.intercept !~= model1a1.intercept absTol 1E-3)
    -    assert(model1a0.coefficients ~== model1b.coefficients absTol 1E-3)
    -    assert(model1a0.intercept ~== model1b.intercept absTol 1E-3)
    +    // set to multinomial for multiclass
    +    mlr.setFamily("multinomial")
    +    val model5 = mlr.fit(multinomialDataset)
    +    assert(model5.coefficientMatrix.numRows === 3 && 
model5.coefficientMatrix.numCols === 4)
    +    assert(model5.interceptVector.size === 3)
    +  }
    +
    +  test("set initial model") {
    +    val lr = new LogisticRegression()
    +    val model1 = lr.fit(smallBinaryDataset)
    +    val lr2 = new 
LogisticRegression().setInitialModel(model1).setMaxIter(5)
    +    val model2 = lr2.fit(smallBinaryDataset)
    +    val predictions1 = 
model1.transform(smallBinaryDataset).select("prediction").collect()
    +    val predictions2 = 
model2.transform(smallBinaryDataset).select("prediction").collect()
    +    predictions1.zip(predictions2).foreach { case (Row(p1: Double), 
Row(p2: Double)) =>
    +      assert(p1 === p2)
    +    }
    +    assert(model2.summary.totalIterations === 1)
    +
    +    val lr3 = new LogisticRegression()
    +    val model3 = lr3.fit(smallMultinomialDataset)
    +    val lr4 = new 
LogisticRegression().setInitialModel(model3).setMaxIter(5)
    +    val model4 = lr4.fit(smallMultinomialDataset)
    +    val predictions3 = 
model3.transform(smallMultinomialDataset).select("prediction").collect()
    +    val predictions4 = 
model4.transform(smallMultinomialDataset).select("prediction").collect()
    +    predictions3.zip(predictions4).foreach { case (Row(p1: Double), 
Row(p2: Double)) =>
    +      assert(p1 === p2)
    +    }
    +    // TODO: check that it converges in a single iteration when initial 
model is available
    --- End diff --
    
    when *model summary* is available


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to