Github user mengxr commented on a diff in the pull request:

    https://github.com/apache/spark/pull/353#discussion_r11458695
  
    --- Diff: 
mllib/src/main/scala/org/apache/spark/mllib/optimization/LBFGS.scala ---
    @@ -0,0 +1,263 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.spark.mllib.optimization
    +
    +import scala.Array
    +import scala.collection.mutable.ArrayBuffer
    +
    +import breeze.linalg.{DenseVector => BDV}
    +import breeze.optimize.{CachedDiffFunction, DiffFunction}
    +
    +import org.apache.spark.Logging
    +import org.apache.spark.rdd.RDD
    +import org.apache.spark.mllib.linalg.{Vectors, Vector}
    +
    +/**
    + * Class used to solve an optimization problem using Limited-memory BFGS.
    + * @param gradient Gradient function to be used.
    + * @param updater Updater to be used to update weights after every 
iteration.
    + */
    +class LBFGS(var gradient: Gradient, var updater: Updater)
    +  extends Optimizer with Logging
    +{
    +  private var numCorrections: Int = 10
    +  private var lineSearchTolerance: Double = 0.9
    +  private var convTolerance: Double = 1E-4
    +  private var maxNumIterations: Int = 100
    +  private var regParam: Double = 0.0
    +  private var miniBatchFraction: Double = 1.0
    +
    +  /**
    +   * Set the number of corrections used in the LBFGS update. Default 10.
    +   * Values of m less than 3 are not recommended; large values of m
    +   * will result in excessive computing time. 3 < m < 10 is recommended.
    +   * Restriction: m > 0
    +   */
    +  def setNumCorrections(corrections: Int): this.type = {
    +    assert(corrections > 0)
    +    this.numCorrections = corrections
    +    this
    +  }
    +
    +  /**
    +   * Set the tolerance to control the accuracy of the line search in 
mcsrch step. Default 0.9.
    +   * If the function and gradient evaluations are inexpensive with respect 
to the cost of
    +   * the iteration (which is sometimes the case when solving very large 
problems) it may
    +   * be advantageous to set to a small value. A typical small value is 0.1.
    +   * Restriction: should be greater than 1e-4.
    +   */
    +  def setLineSearchTolerance(tolerance: Double): this.type = {
    +    this.lineSearchTolerance = tolerance
    +    this
    +  }
    +
    +  /**
    +   * Set fraction of data to be used for each L-BFGS iteration. Default 
1.0.
    +   */
    +  def setMiniBatchFraction(fraction: Double): this.type = {
    +    this.miniBatchFraction = fraction
    +    this
    +  }
    +
    +  /**
    +   * Set the convergence tolerance of iterations for L-BFGS. Default 1E-4.
    +   * Smaller value will lead to higher accuracy with the cost of more 
iterations.
    +   */
    +  def setConvTolerance(tolerance: Int): this.type = {
    +    this.convTolerance = tolerance
    +    this
    +  }
    +
    +  /**
    +   * Set the maximal number of iterations for L-BFGS. Default 100.
    +   */
    +  def setMaxNumIterations(iters: Int): this.type = {
    +    this.maxNumIterations = iters
    +    this
    +  }
    +
    +  /**
    +   * Set the regularization parameter. Default 0.0.
    +   */
    +  def setRegParam(regParam: Double): this.type = {
    +    this.regParam = regParam
    +    this
    +  }
    +
    +  /**
    +   * Set the gradient function (of the loss function of one single data 
example)
    +   * to be used for L-BFGS.
    +   */
    +  def setGradient(gradient: Gradient): this.type = {
    +    this.gradient = gradient
    +    this
    +  }
    +
    +  /**
    +   * Set the updater function to actually perform a gradient step in a 
given direction.
    +   * The updater is responsible to perform the update from the 
regularization term as well,
    +   * and therefore determines what kind or regularization is used, if any.
    +   */
    +  def setUpdater(updater: Updater): this.type = {
    +    this.updater = updater
    +    this
    +  }
    +
    +  def optimize(data: RDD[(Double, Vector)], initialWeights: Vector): 
Vector = {
    --- End diff --
    
    Append `override` to `def` so we know that it will inherit the docs.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to