Github user MLnick commented on a diff in the pull request:

    https://github.com/apache/spark/pull/15593#discussion_r87547519
  
    --- Diff: 
mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
 ---
    @@ -1486,57 +1504,65 @@ private class LogisticAggregator(
         var marginOfLabel = 0.0
         var maxMargin = Double.NegativeInfinity
     
    -    val margins = Array.tabulate(numClasses) { i =>
    -      var margin = 0.0
    -      features.foreachActive { (index, value) =>
    -        if (localFeaturesStd(index) != 0.0 && value != 0.0) {
    -          margin += localCoefficients(i * numFeaturesPlusIntercept + 
index) *
    -            value / localFeaturesStd(index)
    -        }
    +    val margins = new Array[Double](numClasses)
    +    features.foreachActive { (index, value) =>
    +      val stdValue = value / localFeaturesStd(index)
    +      var j = 0
    +      while (j < numClasses) {
    +        margins(j) += localCoefficients(index * numClasses + j) * stdValue
    +        j += 1
           }
    -
    +    }
    +    var i = 0
    +    while (i < numClasses) {
           if (fitIntercept) {
    -        margin += localCoefficients(i * numFeaturesPlusIntercept + 
numFeatures)
    +        margins(i) += localCoefficients(numClasses * numFeatures + i)
           }
    -      if (i == label.toInt) marginOfLabel = margin
    -      if (margin > maxMargin) {
    -        maxMargin = margin
    +      if (i == label.toInt) marginOfLabel = margins(i)
    +      if (margins(i) > maxMargin) {
    +        maxMargin = margins(i)
           }
    -      margin
    +      i += 1
         }
     
         /**
          * When maxMargin > 0, the original formula could cause overflow.
          * We address this by subtracting maxMargin from all the margins, so 
it's guaranteed
          * that all of the new margins will be smaller than zero to prevent 
arithmetic overflow.
          */
    +    val multipliers = new Array[Double](numClasses)
         val sum = {
           var temp = 0.0
    -      if (maxMargin > 0) {
    -        for (i <- 0 until numClasses) {
    -          margins(i) -= maxMargin
    -          temp += math.exp(margins(i))
    -        }
    -      } else {
    -        for (i <- 0 until numClasses) {
    -          temp += math.exp(margins(i))
    -        }
    +      var i = 0
    +      while (i < numClasses) {
    +        if (maxMargin > 0) margins(i) -= maxMargin
    +        val exp = math.exp(margins(i))
    +        temp += exp
    +        multipliers(i) = exp
    +        i += 1
           }
           temp
         }
     
    -    for (i <- 0 until numClasses) {
    -      val multiplier = math.exp(margins(i)) / sum - {
    -        if (label == i) 1.0 else 0.0
    -      }
    -      features.foreachActive { (index, value) =>
    -        if (localFeaturesStd(index) != 0.0 && value != 0.0) {
    -          localGradientArray(i * numFeaturesPlusIntercept + index) +=
    -            weight * multiplier * value / localFeaturesStd(index)
    +    margins.indices.foreach { i =>
    +      multipliers(i) = multipliers(i) / sum - (if (label == i) 1.0 else 
0.0)
    +    }
    +    features.foreachActive { (index, value) =>
    +      if (localFeaturesStd(index) != 0.0 && value != 0.0) {
    +        val stdValue = value / localFeaturesStd(index)
    +        var j = 0
    +        while (j < numClasses) {
    +          localGradientArray(index * numClasses + j) +=
    +            weight * multipliers(j) * stdValue
    +          j += 1
             }
           }
    -      if (fitIntercept) {
    -        localGradientArray(i * numFeaturesPlusIntercept + numFeatures) += 
weight * multiplier
    +    }
    +    if (fitIntercept) {
    +      var i = 0
    +      while (i < numClasses) {
    +        localGradientArray(numFeatures * numClasses + i) += weight * 
multipliers(i)
    +        i += 1
           }
         }
     
    --- End diff --
    
    I'm not sure I fully get where you intend to use `foreachActive` over the 
gradient matrix? Maybe it's the location of this comment that is confusing me 
... 
    
    ... but here in `multinomialUpdateInPlace`, we are iterating over features 
using `foreachActive`, then for each feature iterating over `numClasses`. If we 
iterate over the gradient using `foreachActive` how will that work? Won't it be 
super inefficient? Perhaps I am missing something about what you intend, could 
you clarify with an example?
    
    



---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to