[ https://issues.apache.org/jira/browse/FLINK-1807?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14532311#comment-14532311 ]
ASF GitHub Bot commented on FLINK-1807: --------------------------------------- Github user tillrohrmann commented on a diff in the pull request: https://github.com/apache/flink/pull/613#discussion_r29836296 --- Diff: flink-staging/flink-ml/src/main/scala/org/apache/flink/ml/optimization/GradientDescent.scala --- @@ -0,0 +1,254 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.flink.ml.optimization + +import org.apache.flink.api.common.functions.RichMapFunction +import org.apache.flink.api.scala._ +import org.apache.flink.configuration.Configuration +import org.apache.flink.ml.common._ +import org.apache.flink.ml.math._ +import org.apache.flink.ml.optimization.IterativeSolver.{Iterations, Stepsize} +import org.apache.flink.ml.optimization.Solver._ + +/** This [[Solver]] performs Stochastic Gradient Descent optimization using mini batches + * + * For each labeled vector in a mini batch the gradient is computed and added to a partial + * gradient. The partial gradients are then summed and divided by the size of the batches. The + * average gradient is then used to updated the weight values, including regularization. + * + * At the moment, the whole partition is used for SGD, making it effectively a batch gradient + * descent. Once a sampling operator has been introduced, the algorithm can be optimized + * + * @param runParameters The parameters to tune the algorithm. Currently these include: + * [[Solver.LossFunction]] for the loss function to be used, + * [[Solver.RegularizationType]] for the type of regularization, + * [[Solver.RegularizationParameter]] for the regularization parameter, + * [[IterativeSolver.Iterations]] for the maximum number of iteration, + * [[IterativeSolver.Stepsize]] for the learning rate used. + */ +class GradientDescent(runParameters: ParameterMap) extends IterativeSolver { + + import Solver.WEIGHTVECTOR_BROADCAST + + var parameterMap: ParameterMap = parameters ++ runParameters + + // TODO(tvas): Use once we have proper sampling in place +// case object MiniBatchFraction extends Parameter[Double] { +// val defaultValue = Some(1.0) +// } +// +// def setMiniBatchFraction(fraction: Double): GradientDescent = { +// parameterMap.add(MiniBatchFraction, fraction) +// this +// } + + /** Performs one iteration of Stochastic Gradient Descent using mini batches + * + * @param data A Dataset of LabeledVector (label, features) pairs + * @param currentWeights A Dataset with the current weights to be optimized as its only element + * @return A Dataset containing the weights after one stochastic gradient descent step + */ + private def SGDStep(data: DataSet[(LabeledVector)], currentWeights: DataSet[WeightVector]): + DataSet[WeightVector] = { + + // TODO: Sample from input to realize proper SGD + data.map { + new GradientCalculation + }.withBroadcastSet(currentWeights, WEIGHTVECTOR_BROADCAST).reduce { + (left, right) => + val (leftGradVector, leftLoss, leftCount) = left + val (rightGradVector, rightLoss, rightCount) = right + // Add the left gradient to the right one + BLAS.axpy(1.0, leftGradVector.weights, rightGradVector.weights) + val gradients = WeightVector( + rightGradVector.weights, leftGradVector.intercept + rightGradVector.intercept) + + (gradients , leftLoss + rightLoss, leftCount + rightCount) + }.map { + new WeightsUpdate + }.withBroadcastSet(currentWeights, WEIGHTVECTOR_BROADCAST) + } + + /** Provides a solution for the given optimization problem + * + * @param data A Dataset of LabeledVector (label, features) pairs + * @param initWeights The initial weights that will be optimized + * @return The weights, optimized for the provided data. + */ + override def optimize(data: DataSet[LabeledVector], initWeights: Option[DataSet[WeightVector]]): + DataSet[WeightVector] = { + // TODO: Faster way to do this? + val dimensionsDS = data.map(_.vector.size).reduce((a, b) => b) + + val numberOfIterations: Int = parameterMap(Iterations) + + val initialWeightsDS: DataSet[WeightVector] = initWeights match { + case Some(x) => x + case None => createInitialWeightVector(dimensionsDS) + } + + // Perform the iterations + // TODO: Enable convergence stopping criterion, as in Multiple Linear regression + initialWeightsDS.iterate(numberOfIterations) { + weightVector => { + SGDStep(data, weightVector) + } + } + } + + /** Mapping function that calculates the weight gradients from the data. + * + */ + private class GradientCalculation extends + RichMapFunction[LabeledVector, (WeightVector, Double, Int)] { --- End diff -- better to put "extend RichMapFunction[...]" in one line and indent it > Stochastic gradient descent optimizer for ML library > ---------------------------------------------------- > > Key: FLINK-1807 > URL: https://issues.apache.org/jira/browse/FLINK-1807 > Project: Flink > Issue Type: Improvement > Components: Machine Learning Library > Reporter: Till Rohrmann > Assignee: Theodore Vasiloudis > Labels: ML > > Stochastic gradient descent (SGD) is a widely used optimization technique in > different ML algorithms. Thus, it would be helpful to provide a generalized > SGD implementation which can be instantiated with the respective gradient > computation. Such a building block would make the development of future > algorithms easier. -- This message was sent by Atlassian JIRA (v6.3.4#6332)