Github user zhengruifeng commented on a diff in the pull request:

    https://github.com/apache/spark/pull/12819#discussion_r81128334
  
    --- Diff: 
mllib/src/main/scala/org/apache/spark/mllib/classification/NaiveBayes.scala ---
    @@ -355,79 +356,33 @@ class NaiveBayes private (
        */
       @Since("0.9.0")
       def run(data: RDD[LabeledPoint]): NaiveBayesModel = {
    -    val requireNonnegativeValues: Vector => Unit = (v: Vector) => {
    -      val values = v match {
    -        case sv: SparseVector => sv.values
    -        case dv: DenseVector => dv.values
    -      }
    -      if (!values.forall(_ >= 0.0)) {
    -        throw new SparkException(s"Naive Bayes requires nonnegative 
feature values but found $v.")
    -      }
    -    }
    +    val spark = SparkSession
    +      .builder()
    +      .sparkContext(data.context)
    +      .getOrCreate()
     
    -    val requireZeroOneBernoulliValues: Vector => Unit = (v: Vector) => {
    -      val values = v match {
    -        case sv: SparseVector => sv.values
    -        case dv: DenseVector => dv.values
    -      }
    -      if (!values.forall(v => v == 0.0 || v == 1.0)) {
    -        throw new SparkException(
    -          s"Bernoulli naive Bayes requires 0 or 1 feature values but found 
$v.")
    -      }
    -    }
    +    import spark.implicits._
     
    -    // Aggregates term frequencies per label.
    -    // TODO: Calling combineByKey and collect creates two stages, we can 
implement something
    -    // TODO: similar to reduceByKeyLocally to save one stage.
    -    val aggregated = data.map(p => (p.label, 
p.features)).combineByKey[(Long, DenseVector)](
    -      createCombiner = (v: Vector) => {
    -        if (modelType == Bernoulli) {
    -          requireZeroOneBernoulliValues(v)
    -        } else {
    -          requireNonnegativeValues(v)
    -        }
    -        (1L, v.copy.toDense)
    -      },
    -      mergeValue = (c: (Long, DenseVector), v: Vector) => {
    -        requireNonnegativeValues(v)
    -        BLAS.axpy(1.0, v, c._2)
    -        (c._1 + 1L, c._2)
    -      },
    -      mergeCombiners = (c1: (Long, DenseVector), c2: (Long, DenseVector)) 
=> {
    -        BLAS.axpy(1.0, c2._2, c1._2)
    -        (c1._1 + c2._1, c1._2)
    -      }
    -    ).collect().sortBy(_._1)
    +    val nb = new NewNaiveBayes()
    +      .setModelType(modelType)
    +      .setSmoothing(lambda)
     
    -    val numLabels = aggregated.length
    -    var numDocuments = 0L
    -    aggregated.foreach { case (_, (n, _)) =>
    -      numDocuments += n
    -    }
    -    val numFeatures = aggregated.head match { case (_, (_, v)) => v.size }
    -
    -    val labels = new Array[Double](numLabels)
    -    val pi = new Array[Double](numLabels)
    -    val theta = Array.fill(numLabels)(new Array[Double](numFeatures))
    -
    -    val piLogDenom = math.log(numDocuments + numLabels * lambda)
    -    var i = 0
    -    aggregated.foreach { case (label, (n, sumTermFreqs)) =>
    -      labels(i) = label
    -      pi(i) = math.log(n + lambda) - piLogDenom
    -      val thetaLogDenom = modelType match {
    -        case Multinomial => math.log(sumTermFreqs.values.sum + numFeatures 
* lambda)
    -        case Bernoulli => math.log(n + 2.0 * lambda)
    -        case _ =>
    -          // This should never happen.
    -          throw new UnknownError(s"Invalid modelType: $modelType.")
    -      }
    -      var j = 0
    -      while (j < numFeatures) {
    -        theta(i)(j) = math.log(sumTermFreqs(j) + lambda) - thetaLogDenom
    -        j += 1
    -      }
    -      i += 1
    +    val labels = data.map(_.label).distinct().collect().sorted
    --- End diff --
    
    Labels in mllib's NB implementation are not guaranteed to be in range [0, 
numClasses), and the following code with label set to `{-1,+1}` run successfully
    ```
    import org.apache.spark.mllib.linalg._
    import org.apache.spark.mllib.regression.LabeledPoint
    import org.apache.spark.mllib.classification.NaiveBayes
    
    val points = Seq(LabeledPoint(-1.0, Vectors.dense(Array(1.0,2.0))), 
LabeledPoint(+1.0, Vectors.dense(Array(1.0,2.0))))
    val rdd = sc.parallelize(points)
    val nbm = NaiveBayes.train(rdd)
    
    ```
    



---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to