LCOV - code coverage report
Current view: top level - solvers - FGM.h (source / functions) Hit Total Coverage
Test: test_coverage.info.cleaned Lines: 0 1 0.0 %
Date: 2022-08-04 03:43:28 Functions: 0 4 0.0 %

          Line data    Source code
       1             : #pragma once
       2             : 
       3             : #include "Solver.h"
       4             : 
       5             : namespace elsa
       6             : {
       7             :     /**
       8             :      * @brief Class implementing Nesterov's Fast Gradient Method.
       9             :      *
      10             :      * This class implements Nesterov's Fast Gradient Method. FGM is a first order method to
      11             :      * efficiently optimize convex functions with Lipschitz-Continuous gradients.
      12             :      *
      13             :      * @details
      14             :      * # Nesterov's fast gradient method algorithm overview #
      15             :      * The algorithm repeats the following update steps for \f$i = 0, \dots, N-1\f$
      16             :      * \f{align*}{
      17             :      * y_{i+1} &= x_i - \frac{1}{L} f'(x_i) \\
      18             :      * t_{i+1} &= \frac{1 + \sqrt{1 + 4 t^2_i}}{2} \\
      19             :      * x_{i+1} &= y_{i} + \frac{t_i - 1}{t_{i+1}}(y_{i+1} - y_i)
      20             :      * \f}
      21             :      * The inputs are \f$f \in C_{L}^{1, 1}(\mathbb{R}^d)\f$, \f$x_0 \in \mathbb{R}^d\f$,
      22             :      * \f$y_0 = x_0\f$, \f$t_0 = 1\f$
      23             :      *
      24             :      * The presented (and also implemented) version of the algorithm corresponds to _FGM1_ in the
      25             :      * referenced paper.
      26             :      *
      27             :      * ## Convergence ##
      28             :      * Compared to the standard gradient descent, which has a convergence rate of
      29             :      * \f$\mathcal{O}(\frac{1}{N})\f$, the Nesterov's gradient method boots the convergence rate to
      30             :      * \f$\mathcal{O}(\frac{1}{N}^2)\f$
      31             :      *
      32             :      * In the current implementation, no particular stopping rule is implemented, only a fixed
      33             :      * number of iterations is used.
      34             :      *
      35             :      * ## FGM References ##
      36             :      * - Kim, D., Fessler, J.A. _Optimized first-order methods for smooth convex minimization_
      37             :      (2016) https://doi.org/10.1007/s10107-015-0949-3
      38             :      *
      39             :      * @tparam data_t data type for the domain and range of the problem, defaulting to real_t
      40             :      *
      41             :      * @see \verbatim embed:rst
      42             :      For a basic introduction and problem statement of first-order methods, see
      43             :      :ref:`here <elsa-first-order-methods-doc>` \endverbatim
      44             :      *
      45             :      * @author
      46             :      * - Michael Loipführer - initial code
      47             :      * - David Frank - Detailed Documentation
      48             :      */
      49             :     template <typename data_t = real_t>
      50             :     class FGM : public Solver<data_t>
      51             :     {
      52             :     public:
      53             :         /// Scalar alias
      54             :         using Scalar = typename Solver<data_t>::Scalar;
      55             : 
      56             :         /**
      57             :          * @brief Constructor for FGM, accepting an optimization problem and, optionally, a value
      58             :          * for epsilon
      59             :          *
      60             :          * @param[in] problem the problem that is supposed to be solved
      61             :          * @param[in] epsilon affects the stopping condition
      62             :          */
      63             :         FGM(const Problem<data_t>& problem,
      64             :             data_t epsilon = std::numeric_limits<data_t>::epsilon());
      65             : 
      66             :         /**
      67             :          * @brief Constructor for FGM, accepting an optimization problem, the inverse of a
      68             :          * preconditioner and, optionally, a value for epsilon
      69             :          *
      70             :          * @param[in] problem the problem that is supposed to be solved
      71             :          * @param[in] preconditionerInverse the inverse of the preconditioner
      72             :          * @param[in] epsilon affects the stopping condition
      73             :          */
      74             :         FGM(const Problem<data_t>& problem, const LinearOperator<data_t>& preconditionerInverse,
      75             :             data_t epsilon = std::numeric_limits<data_t>::epsilon());
      76             : 
      77             :         /// make copy constructor deletion explicit
      78             :         FGM(const FGM<data_t>&) = delete;
      79             : 
      80             :         /// default destructor
      81           0 :         ~FGM() override = default;
      82             : 
      83             :         /// lift the base class method getCurrentSolution
      84             :         using Solver<data_t>::getCurrentSolution;
      85             : 
      86             :     private:
      87             :         /// the default number of iterations
      88             :         const index_t _defaultIterations{100};
      89             : 
      90             :         /// variable affecting the stopping condition
      91             :         data_t _epsilon;
      92             : 
      93             :         /// the inverse of the preconditioner (if supplied)
      94             :         std::unique_ptr<LinearOperator<data_t>> _preconditionerInverse{};
      95             : 
      96             :         /// lift the base class variable _problem
      97             :         using Solver<data_t>::_problem;
      98             : 
      99             :         /**
     100             :          * @brief Solve the optimization problem, i.e. apply iterations number of iterations of
     101             :          * gradient descent
     102             :          *
     103             :          * @param[in] iterations number of iterations to execute (the default 0 value executes
     104             :          * _defaultIterations of iterations)
     105             :          *
     106             :          * @returns a reference to the current solution
     107             :          */
     108             :         DataContainer<data_t>& solveImpl(index_t iterations) override;
     109             : 
     110             :         /// implement the polymorphic clone operation
     111             :         FGM<data_t>* cloneImpl() const override;
     112             : 
     113             :         /// implement the polymorphic comparison operation
     114             :         bool isEqual(const Solver<data_t>& other) const override;
     115             :     };
     116             : } // namespace elsa

Generated by: LCOV version 1.14