GCC Code Coverage Report


Directory: ./
File: include/crocoddyl/core/solver-base.hpp
Date: 2025-05-13 10:30:51
Exec Total Coverage
Lines: 0 2 0.0%
Branches: 0 0 -%

Line Branch Exec Source
1 ///////////////////////////////////////////////////////////////////////////////
2 // BSD 3-Clause License
3 //
4 // Copyright (C) 2019-2025, LAAS-CNRS, University of Edinburgh,
5 // Heriot-Watt University
6 // Copyright note valid unless otherwise stated in individual files.
7 // All rights reserved.
8 ///////////////////////////////////////////////////////////////////////////////
9
10 #ifndef CROCODDYL_CORE_SOLVER_BASE_HPP_
11 #define CROCODDYL_CORE_SOLVER_BASE_HPP_
12
13 #include "crocoddyl/core/fwd.hpp"
14 #include "crocoddyl/core/optctrl/shooting.hpp"
15 #include "crocoddyl/core/utils/stop-watch.hpp"
16
17 namespace crocoddyl {
18
19 class CallbackAbstract; // forward declaration
20 static std::vector<Eigen::VectorXd> DEFAULT_VECTOR;
21
22 enum FeasibilityNorm { LInf = 0, L1 };
23
24 /**
25 * @brief Abstract class for optimal control solvers
26 *
27 * A solver resolves an optimal control solver of the form
28 * \f{eqnarray*}{
29 * \begin{Bmatrix}
30 * \mathbf{x}^*_0,\cdots,\mathbf{x}^*_{T} \\
31 * \mathbf{u}^*_0,\cdots,\mathbf{u}^*_{T-1}
32 * \end{Bmatrix} =
33 * \arg\min_{\mathbf{x}_s,\mathbf{u}_s} && l_T (\mathbf{x}_T) + \sum_{k=0}^{T-1}
34 * l_k(\mathbf{x}_t,\mathbf{u}_t) \\
35 * \operatorname{subject}\,\operatorname{to} && \mathbf{x}_0 =
36 * \mathbf{\tilde{x}}_0\\
37 * && \mathbf{x}_{k+1} = \mathbf{f}_k(\mathbf{x}_k,\mathbf{u}_k)\\
38 * && \mathbf{x}_k\in\mathcal{X}, \mathbf{u}_k\in\mathcal{U}
39 * \f}
40 * where \f$l_T(\mathbf{x}_T)\f$, \f$l_k(\mathbf{x}_t,\mathbf{u}_t)\f$ are the
41 * terminal and running cost functions, respectively,
42 * \f$\mathbf{f}_k(\mathbf{x}_k,\mathbf{u}_k)\f$ describes evolution of the
43 * system, and state and control admissible sets are defined by
44 * \f$\mathbf{x}_k\in\mathcal{X}\f$, \f$\mathbf{u}_k\in\mathcal{U}\f$. An action
45 * model, defined in the shooting problem, describes each node \f$k\f$. Inside
46 * the action model, we specialize the cost functions, the system evolution and
47 * the admissible sets.
48 *
49 * The main routines are `computeDirection()` and `tryStep()`. The former finds
50 * a search direction and typically computes the derivatives of each action
51 * model. The latter rollout the dynamics and cost (i.e., the action) to try the
52 * search direction found by `computeDirection`. Both functions used the current
53 * guess defined by `setCandidate()`. Finally, `solve()` function is used to
54 * define when the search direction and length are computed in each iterate. It
55 * also describes the globalization strategy (i.e., regularization) of the
56 * numerical optimization.
57 *
58 * \sa `solve()`, `computeDirection()`, `tryStep()`, `stoppingCriteria()`
59 */
60 class SolverAbstract {
61 public:
62 EIGEN_MAKE_ALIGNED_OPERATOR_NEW
63
64 /**
65 * @brief Initialize the solver
66 *
67 * @param[in] problem shooting problem
68 */
69 explicit SolverAbstract(std::shared_ptr<ShootingProblem> problem);
70 virtual ~SolverAbstract();
71
72 /**
73 * @brief Compute the optimal trajectory \f$\mathbf{x}^*_s,\mathbf{u}^*_s\f$
74 * as lists of \f$T+1\f$ and \f$T\f$ terms
75 *
76 * From an initial guess \p init_xs, \p init_us (feasible or not), iterate
77 * over `computeDirection()` and `tryStep()` until `stoppingCriteria()` is
78 * below threshold. It also describes the globalization strategy used during
79 * the numerical optimization.
80 *
81 * @param[in] init_xs initial guess for state trajectory with \f$T+1\f$
82 * elements (default [])
83 * @param[in] init_us initial guess for control trajectory with \f$T\f$
84 * elements (default [])
85 * @param[in] maxiter maximum allowed number of iterations (default 100)
86 * @param[in] is_feasible true if the \p init_xs are obtained from
87 * integrating the \p init_us (rollout) (default false)
88 * @param[in] init_reg initial guess for the regularization value. Very
89 * low values are typical used with very good guess points (default 1e-9).
90 * @return A boolean that describes if convergence was reached.
91 */
92 virtual bool solve(
93 const std::vector<Eigen::VectorXd>& init_xs = DEFAULT_VECTOR,
94 const std::vector<Eigen::VectorXd>& init_us = DEFAULT_VECTOR,
95 const std::size_t maxiter = 100, const bool is_feasible = false,
96 const double reg_init = NAN) = 0;
97
98 /**
99 * @brief Compute the search direction
100 * \f$(\delta\mathbf{x}^k,\delta\mathbf{u}^k)\f$ for the current guess
101 * \f$(\mathbf{x}^k_s,\mathbf{u}^k_s)\f$.
102 *
103 * You must call `setCandidate()` first in order to define the current guess.
104 * A current guess defines a state and control trajectory
105 * \f$(\mathbf{x}^k_s,\mathbf{u}^k_s)\f$ of \f$T+1\f$ and \f$T\f$ elements,
106 * respectively.
107 *
108 * @param[in] recalc true for recalculating the derivatives at current state
109 * and control
110 * @return The search direction \f$(\delta\mathbf{x},\delta\mathbf{u})\f$ and
111 * the dual lambdas as lists of \f$T+1\f$, \f$T\f$ and \f$T+1\f$ lengths,
112 * respectively
113 */
114 virtual void computeDirection(const bool recalc) = 0;
115
116 /**
117 * @brief Try a predefined step length \f$\alpha\f$ and compute its cost
118 * improvement \f$dV\f$.
119 *
120 * It uses the search direction found by `computeDirection()` to try a
121 * determined step length \f$\alpha\f$. Therefore, it assumes that we have run
122 * `computeDirection()` first. Additionally, it returns the cost improvement
123 * \f$dV\f$ along the predefined step length \f$\alpha\f$.
124 *
125 * @param[in] steplength applied step length (\f$0\leq\alpha\leq1\f$)
126 * @return the cost improvement
127 */
128 virtual double tryStep(const double steplength = 1) = 0;
129
130 /**
131 * @brief Return a positive value that quantifies the algorithm termination
132 *
133 * These values typically represents the gradient norm which tell us that it's
134 * been reached the local minima. The stopping criteria strictly speaking
135 * depends on the search direction (calculated by `computeDirection()`) but
136 * it could also depend on the chosen step length, tested by `tryStep()`.
137 */
138 virtual double stoppingCriteria() = 0;
139
140 /**
141 * @brief Return the expected improvement \f$dV_{exp}\f$ from a given current
142 * search direction \f$(\delta\mathbf{x}^k,\delta\mathbf{u}^k)\f$
143 *
144 * For computing the expected improvement, you need to compute the search
145 * direction first via `computeDirection()`.
146 */
147 virtual const Eigen::Vector2d& expectedImprovement() = 0;
148
149 /**
150 * @brief Resizing the solver data
151 *
152 * If the shooting problem has changed after construction, then this function
153 * resizes all the data before starting resolve the problem.
154 */
155 virtual void resizeData();
156
157 /**
158 * @brief Compute the dynamic feasibility
159 * \f$\|\mathbf{f}_{\mathbf{s}}\|_{\infty,1}\f$ for the current guess
160 * \f$(\mathbf{x}^k,\mathbf{u}^k)\f$
161 *
162 * The feasibility can be computed using different norms (e.g,
163 * \f$\ell_\infty\f$ or \f$\ell_1\f$ norms). By default we use the
164 * \f$\ell_\infty\f$ norm, however, we can change the type of norm using
165 * `set_feasnorm`. Note that \f$\mathbf{f}_{\mathbf{s}}\f$ are the gaps on the
166 * dynamics, which are computed at each node as
167 * \f$\mathbf{x}^{'}-\mathbf{f}(\mathbf{x},\mathbf{u})\f$.
168 */
169 double computeDynamicFeasibility();
170
171 /**
172 * @brief Compute the feasibility of the inequality constraints for the
173 * current guess
174 *
175 * The feasibility can be computed using different norms (e.g,
176 * \f$\ell_\infty\f$ or \f$\ell_1\f$ norms). By default we use the
177 * \f$\ell_\infty\f$ norm, however, we can change the type of norm using
178 * `set_feasnorm`.
179 */
180 double computeInequalityFeasibility();
181
182 /**
183 * @brief Compute the feasibility of the equality constraints for the current
184 * guess
185 *
186 * The feasibility can be computed using different norms (e.g,
187 * \f$\ell_\infty\f$ or \f$\ell_1\f$ norms). By default we use the
188 * \f$\ell_\infty\f$ norm, however, we can change the type of norm using
189 * `set_feasnorm`.
190 */
191 double computeEqualityFeasibility();
192
193 /**
194 * @brief Set the solver candidate trajectories
195 * \f$(\mathbf{x}_s,\mathbf{u}_s)\f$
196 *
197 * The solver candidates are defined as a state and control trajectories
198 * \f$(\mathbf{x}_s,\mathbf{u}_s)\f$ of \f$T+1\f$ and \f$T\f$ elements,
199 * respectively. Additionally, we need to define the dynamic feasibility of
200 * the \f$(\mathbf{x}_s,\mathbf{u}_s)\f$ pair. Note that the trajectories are
201 * feasible if \f$\mathbf{x}_s\f$ is the resulting trajectory from the system
202 * rollout with \f$\mathbf{u}_s\f$ inputs.
203 *
204 * @param[in] xs state trajectory of \f$T+1\f$ elements (default [])
205 * @param[in] us control trajectory of \f$T\f$ elements (default [])
206 * @param[in] isFeasible true if the \p xs are obtained from integrating the
207 * \p us (rollout)
208 */
209 void setCandidate(
210 const std::vector<Eigen::VectorXd>& xs_warm = DEFAULT_VECTOR,
211 const std::vector<Eigen::VectorXd>& us_warm = DEFAULT_VECTOR,
212 const bool is_feasible = false);
213
214 /**
215 * @brief Set a list of callback functions using for the solver diagnostic
216 *
217 * Each iteration, the solver calls these set of functions in order to allowed
218 * user the diagnostic of its performance.
219 *
220 * @param callbacks set of callback functions
221 */
222 void setCallbacks(
223 const std::vector<std::shared_ptr<CallbackAbstract> >& callbacks);
224
225 /**
226 * @brief Return the list of callback functions using for diagnostic
227 */
228 const std::vector<std::shared_ptr<CallbackAbstract> >& getCallbacks() const;
229
230 /**
231 * @brief Return the shooting problem
232 */
233 const std::shared_ptr<ShootingProblem>& get_problem() const;
234
235 /**
236 * @brief Return the state trajectory \f$\mathbf{x}_s\f$
237 */
238 const std::vector<Eigen::VectorXd>& get_xs() const;
239
240 /**
241 * @brief Return the control trajectory \f$\mathbf{u}_s\f$
242 */
243 const std::vector<Eigen::VectorXd>& get_us() const;
244
245 /**
246 * @brief Return the dynamic infeasibility \f$\mathbf{f}_{s}\f$
247 */
248 const std::vector<Eigen::VectorXd>& get_fs() const;
249
250 /**
251 * @brief Return the feasibility status of the
252 * \f$(\mathbf{x}_s,\mathbf{u}_s)\f$ trajectory
253 */
254 bool get_is_feasible() const;
255
256 /**
257 * @brief Return the cost for the current guess
258 */
259 double get_cost() const;
260
261 /**
262 * @brief Return the merit for the current guess
263 */
264 double get_merit() const;
265
266 /**
267 * @brief Return the stopping-criteria value computed by `stoppingCriteria()`
268 */
269 double get_stop() const;
270
271 /**
272 * @brief Return the linear and quadratic terms of the expected improvement
273 */
274 const Eigen::Vector2d& get_d() const;
275
276 /**
277 * @brief Return the reduction in the cost function \f$\Delta V\f$
278 */
279 double get_dV() const;
280
281 /**
282 * @brief Return the reduction in the merit function \f$\Delta\Phi\f$
283 */
284 double get_dPhi() const;
285
286 /**
287 * @brief Return the expected reduction in the cost function \f$\Delta
288 * V_{exp}\f$
289 */
290 double get_dVexp() const;
291
292 /**
293 * @brief Return the expected reduction in the merit function
294 * \f$\Delta\Phi_{exp}\f$
295 */
296 double get_dPhiexp() const;
297
298 /**
299 * @brief Return the reduction in the feasibility
300 */
301 double get_dfeas() const;
302
303 /**
304 * @brief Return the total feasibility for the current guess
305 */
306 double get_feas() const;
307
308 /**
309 * @brief Return the dynamic feasibility for the current guess
310 */
311 double get_ffeas() const;
312
313 /**
314 * @brief Return the inequality feasibility for the current guess
315 */
316 double get_gfeas() const;
317
318 /**
319 * @brief Return the equality feasibility for the current guess
320 */
321 double get_hfeas() const;
322
323 /**
324 * @brief Return the dynamic feasibility for the current step length
325 */
326 double get_ffeas_try() const;
327
328 /**
329 * @brief Return the inequality feasibility for the current step length
330 */
331 double get_gfeas_try() const;
332
333 /**
334 * @brief Return the equality feasibility for the current step length
335 */
336 double get_hfeas_try() const;
337
338 /**
339 * @brief Return the primal-variable regularization
340 */
341 double get_preg() const;
342
343 /**
344 * @brief Return the dual-variable regularization
345 */
346 double get_dreg() const;
347
348 DEPRECATED("Use get_preg for primal-variable regularization",
349 double get_xreg() const;)
350 DEPRECATED("Use get_preg for primal-variable regularization",
351 double get_ureg() const;)
352
353 /**
354 * @brief Return the step length \f$\alpha\f$
355 */
356 double get_steplength() const;
357
358 /**
359 * @brief Return the threshold used for accepting a step
360 */
361 double get_th_acceptstep() const;
362
363 /**
364 * @brief Return the tolerance for stopping the algorithm
365 */
366 double get_th_stop() const;
367
368 /**
369 * @brief Return the threshold for accepting a gap as non-zero
370 */
371 double get_th_gaptol() const;
372
373 /**
374 * @brief Return the type of norm used to evaluate the dynamic and constraints
375 * feasibility
376 */
377 FeasibilityNorm get_feasnorm() const;
378
379 /**
380 * @brief Return the number of iterations performed by the solver
381 */
382 std::size_t get_iter() const;
383
384 /**
385 * @brief Modify the state trajectory \f$\mathbf{x}_s\f$
386 */
387 void set_xs(const std::vector<Eigen::VectorXd>& xs);
388
389 /**
390 * @brief Modify the control trajectory \f$\mathbf{u}_s\f$
391 */
392 void set_us(const std::vector<Eigen::VectorXd>& us);
393
394 /**
395 * @brief Modify the primal-variable regularization value
396 */
397 void set_preg(const double preg);
398
399 /**
400 * @brief Modify the dual-variable regularization value
401 */
402 void set_dreg(const double dreg);
403
404 DEPRECATED("Use set_preg for primal-variable regularization",
405 void set_xreg(const double xreg);)
406 DEPRECATED("Use set_preg for primal-variable regularization",
407 void set_ureg(const double ureg);)
408
409 /**
410 * @brief Modify the threshold used for accepting step
411 */
412 void set_th_acceptstep(const double th_acceptstep);
413
414 /**
415 * @brief Modify the tolerance for stopping the algorithm
416 */
417 void set_th_stop(const double th_stop);
418
419 /**
420 * @brief Modify the threshold for accepting a gap as non-zero
421 */
422 void set_th_gaptol(const double th_gaptol);
423
424 /**
425 * @brief Modify the current norm used for computed the dynamic and constraint
426 * feasibility
427 */
428 void set_feasnorm(const FeasibilityNorm feas_norm);
429
430 protected:
431 std::shared_ptr<ShootingProblem> problem_; //!< optimal control problem
432 std::vector<Eigen::VectorXd> xs_; //!< State trajectory
433 std::vector<Eigen::VectorXd> us_; //!< Control trajectory
434 std::vector<Eigen::VectorXd> fs_; //!< Gaps/defects between shooting nodes
435 std::vector<std::shared_ptr<CallbackAbstract> >
436 callbacks_; //!< Callback functions
437 bool is_feasible_; //!< Label that indicates is the iteration is feasible
438 bool was_feasible_; //!< Label that indicates in the previous iterate was
439 //!< feasible
440 double cost_; //!< Cost for the current guess
441 double merit_; //!< Merit for the current guess
442 double stop_; //!< Value computed by `stoppingCriteria()`
443 Eigen::Vector2d d_; //!< LQ approximation of the expected improvement
444 double dV_; //!< Reduction in the cost function computed by `tryStep()`
445 double dPhi_; //!< Reduction in the merit function computed by `tryStep()`
446 double dVexp_; //!< Expected reduction in the cost function
447 double dPhiexp_; //!< Expected reduction in the merit function
448 double dfeas_; //!< Reduction in the feasibility
449 double feas_; //!< Total feasibility for the current guess
450 double
451 ffeas_; //!< Feasibility of the dynamic constraints for the current guess
452 double gfeas_; //!< Feasibility of the inequality constraints for the current
453 //!< guess
454 double hfeas_; //!< Feasibility of the equality constraints for the current
455 //!< guess
456 double ffeas_try_; //!< Feasibility of the dynamic constraints evaluated for
457 //!< the current step length
458 double gfeas_try_; //!< Feasibility of the inequality constraints evaluated
459 //!< for the current step length
460 double hfeas_try_; //!< Feasibility of the equality constraints evaluated for
461 //!< the current step length
462 double preg_; //!< Current primal-variable regularization value
463 double dreg_; //!< Current dual-variable regularization value
464 DEPRECATED("Use preg_ for primal-variable regularization",
465 double xreg_;) //!< Current state regularization value
466 DEPRECATED("Use dreg_ for primal-variable regularization",
467 double ureg_;) //!< Current control regularization values
468 double steplength_; //!< Current applied step length
469 double th_acceptstep_; //!< Threshold used for accepting step
470 double th_stop_; //!< Tolerance for stopping the algorithm
471 double th_gaptol_; //!< Threshold limit to check non-zero gaps
472 enum FeasibilityNorm feasnorm_; //!< Type of norm used to evaluate the
473 //!< dynamics and constraints feasibility
474 std::size_t iter_; //!< Number of iteration performed by the solver
475 double tmp_feas_; //!< Temporal variables used for computed the feasibility
476 std::vector<Eigen::VectorXd> g_adj_; //!< Adjusted inequality bound
477 };
478
479 /**
480 * @brief Abstract class for solver callbacks
481 *
482 * A callback is used to diagnostic the behaviour of our solver in each
483 * iteration of it. For instance, it can be used to print values, record data or
484 * display motions.
485 */
486 class CallbackAbstract {
487 public:
488 /**
489 * @brief Initialize the callback function
490 */
491 CallbackAbstract() {}
492 virtual ~CallbackAbstract() {}
493
494 /**
495 * @brief Run the callback function given a solver
496 *
497 * @param[in] solver solver to be diagnostic
498 */
499 virtual void operator()(SolverAbstract& solver) = 0;
500 };
501
502 bool raiseIfNaN(const double value);
503
504 } // namespace crocoddyl
505
506 #endif // CROCODDYL_CORE_SOLVER_BASE_HPP_
507