Github user tillrohrmann commented on a diff in the pull request: https://github.com/apache/flink/pull/692#discussion_r30780000 --- Diff: flink-staging/flink-ml/src/main/scala/org/apache/flink/ml/optimization/GradientDescent.scala --- @@ -76,86 +77,163 @@ class GradientDescent(runParameters: ParameterMap) extends IterativeSolver { }.withBroadcastSet(currentWeights, WEIGHTVECTOR_BROADCAST) } + + /** Provides a solution for the given optimization problem * * @param data A Dataset of LabeledVector (label, features) pairs - * @param initWeights The initial weights that will be optimized + * @param initialWeights The initial weights that will be optimized * @return The weights, optimized for the provided data. */ override def optimize( data: DataSet[LabeledVector], - initWeights: Option[DataSet[WeightVector]]): DataSet[WeightVector] = { - // TODO: Faster way to do this? - val dimensionsDS = data.map(_.vector.size).reduce((a, b) => b) - - val numberOfIterations: Int = parameterMap(Iterations) + initialWeights: Option[DataSet[WeightVector]]): DataSet[WeightVector] = { + val numberOfIterations: Int = parameters(Iterations) + // TODO(tvas): This looks out of place, why don't we get back an Option from + // parameters(ConvergenceThreshold)? --- End diff -- the `apply` method unpacks the option value, whereas the `get` method returns an `Option`. Maybe we should rename it into `getOption`.
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. ---