Github user tillrohrmann commented on a diff in the pull request: https://github.com/apache/flink/pull/613#discussion_r29349648 --- Diff: flink-staging/flink-ml/src/main/scala/org/apache/flink/ml/optimization/GradientDescent.scala --- @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.flink.ml.optimization + +import org.apache.flink.api.common.functions.RichMapFunction +import org.apache.flink.api.scala._ +import org.apache.flink.configuration.Configuration +import org.apache.flink.ml.common._ +import org.apache.flink.ml.math._ +import org.apache.flink.ml.optimization.IterativeSolver.{Iterations, Stepsize} +import org.apache.flink.ml.optimization.Solver._ + +/** This [[Solver]] performs Stochastic Gradient Descent optimization using mini batches + * + * For each labeled vector in a mini batch the gradient is computed and added to a partial + * gradient. The partial gradients are then summed and divided by the size of the batches. The + * average gradient is then used to updated the weight values, including regularization. + * + * At the moment, the whole partition is used for SGD, making it effectively a batch gradient + * descent. Once a sampling operator has been introduced, the algorithm can be optimized + * + * @param runParameters The parameters to tune the algorithm. Currently these include: + * [[Solver.LossFunction]] for the loss function to be used, + * [[Solver.RegularizationType]] for the type of regularization, + * [[Solver.RegularizationParameter]] for the regularization parameter, + * [[IterativeSolver.Iterations]] for the maximum number of iteration, + * [[IterativeSolver.Stepsize]] for the learning rate used. + */ +class GradientDescent(runParameters: ParameterMap) extends IterativeSolver { + + import Solver.WEIGHTVECTOR_BROADCAST + + var parameterMap: ParameterMap = parameters ++ runParameters + + // TODO(tvas): Use once we have proper sampling in place +// case object MiniBatchFraction extends Parameter[Double] { +// val defaultValue = Some(1.0) +// } +// +// def setMiniBatchFraction(fraction: Double): GradientDescent = { +// parameterMap.add(MiniBatchFraction, fraction) +// this +// } + + /** Performs one iteration of Stochastic Gradient Descent using mini batches + * + * @param data A Dataset of LabeledVector (label, features) pairs + * @param currentWeights A Dataset with the current weights to be optimized as its only element + * @return A Dataset containing the weights after one stochastic gradient descent step + */ + private def SGDStep(data: DataSet[(LabeledVector)], currentWeights: DataSet[WeightVector]): + DataSet[WeightVector] = { + + // TODO: Sample from input to realize proper SGD + data.map { + new GradientCalculation + }.withBroadcastSet(currentWeights, WEIGHTVECTOR_BROADCAST).reduce { + (left, right) => + val (leftGradientVector, leftCount) = left + val (rightGradientVector, rightCount) = right + + BLAS.axpy(1.0, leftGradientVector.weights, rightGradientVector.weights) + (new WeightVector( + rightGradientVector.weights, + leftGradientVector.intercept + rightGradientVector.intercept), + leftCount + rightCount) --- End diff -- Sorry, could have been a little bit more verbose here. Well MLRegression is not necessarily perfect ;-) The parameters for the WeightVector have the same indentation as the second tuple value which makes it hard to parse. Increasing the indentation for the parameters of the WeightVector should make things clearer.
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. ---