GCC Code Coverage Report


Directory: ./
File: include/crocoddyl/core/solvers/ipopt/ipopt-iface.hpp
Date: 2025-01-16 08:47:40
Exec Total Coverage
Lines: 38 57 66.7%
Branches: 35 70 50.0%

Line Branch Exec Source
1 ///////////////////////////////////////////////////////////////////////////////
2 // BSD 3-Clause License
3 //
4 // Copyright (C) 2022-2023, IRI: CSIC-UPC, Heriot-Watt University
5 // Copyright note valid unless otherwise stated in individual files.
6 // All rights reserved.
7 ///////////////////////////////////////////////////////////////////////////////
8
9 #ifndef __CROCODDYL_CORE_SOLVERS_IPOPT_IPOPT_IFACE_HPP__
10 #define __CROCODDYL_CORE_SOLVERS_IPOPT_IPOPT_IFACE_HPP__
11
12 #define HAVE_CSTDDEF
13 #include <IpTNLP.hpp>
14 #undef HAVE_CSTDDEF
15
16 #include "crocoddyl/core/mathbase.hpp"
17 #include "crocoddyl/core/optctrl/shooting.hpp"
18
19 namespace crocoddyl {
20
21 struct IpoptInterfaceData;
22
23 /**
24 * @brief Class for interfacing a crocoddyl::ShootingProblem with IPOPT
25 *
26 * This class implements the pure virtual functions from Ipopt::TNLP to solve
27 * the optimal control problem in `problem_` using a multiple shooting approach.
28 *
29 * Ipopt considers its decision variables `x` to belong to the Euclidean space.
30 * However, Crocoddyl states could lie in a manifold. To ensure that the
31 * solution of Ipopt lies in the manifold of the state, we perform the
32 * optimization in the tangent space of a given initial state. Finally we
33 * retract the Ipopt solution to the manifold. That is:
34 * * \f[
35 * \begin{aligned}
36 * \mathbf{x}^* = \mathbf{x}^0 \oplus \mathbf{\Delta x}^*
37 * \end{aligned}
38 * \f]
39 *
40 * where \f$\mathbf{x}^*\f$ is the final solution, \f$\mathbf{x}^0\f$ is the
41 * initial guess and \f$\mathbf{\Delta x}^*\f$ is the Ipopt solution in the
42 * tangent space of \f$\mathbf{x}_0\f$. Due to this procedure, the computation
43 * of the cost function, the dynamic constraint as well as their corresponding
44 * derivatives should be properly modified.
45 *
46 * The Ipopt decision vector is built as follows: \f$x = [ \mathbf{\Delta
47 * x}_0^\top, \mathbf{u}_0^\top, \mathbf{\Delta x}_1^\top, \mathbf{u}_1^\top,
48 * \dots, \mathbf{\Delta x}_N^\top ]\f$
49 *
50 * Dynamic constraints are posed as: \f$(\mathbf{x}^0_{k+1} \oplus
51 * \mathbf{\Delta x}_{k+1}) \ominus \mathbf{f}(\mathbf{x}_{k}^0 \oplus
52 * \mathbf{\Delta x}_{k}, \mathbf{u}_k) = \mathbf{0}\f$
53 *
54 * Initial condition: \f$ \mathbf{x}(0) \ominus (\mathbf{x}_{k}^0 \oplus
55 * \mathbf{\Delta x}_{k}) = \mathbf{0}\f$
56 *
57 * Documentation of the methods has been extracted from Ipopt::TNLP.hpp file
58 *
59 * \sa `get_nlp_info()`, `get_bounds_info()`, `eval_f()`, `eval_g()`,
60 * `eval_grad_f()`, `eval_jac_g()`, `eval_h()`
61 */
62
63 class IpoptInterface : public Ipopt::TNLP {
64 public:
65 EIGEN_MAKE_ALIGNED_OPERATOR_NEW
66
67 /**
68 * @brief Initialize the Ipopt interface
69 *
70 * @param[in] problem Crocoddyl shooting problem
71 */
72 IpoptInterface(const boost::shared_ptr<crocoddyl::ShootingProblem>& problem);
73
74 virtual ~IpoptInterface();
75
76 /**
77 * @brief Methods to gather information about the NLP
78 *
79 * %Ipopt uses this information when allocating the arrays that it will later
80 * ask you to fill with values. Be careful in this method since incorrect
81 * values will cause memory bugs which may be very difficult to find.
82 * @param[out] n Storage for the number of variables \f$x\f$
83 * @param[out] m Storage for the number of constraints \f$g(x)\f$
84 * @param[out] nnz_jac_g Storage for the number of nonzero entries in the
85 * Jacobian
86 * @param[out] nnz_h_lag Storage for the number of nonzero entries in the
87 * Hessian
88 * @param[out] index_style Storage for the index style the numbering style
89 * used for row/col entries in the sparse matrix format
90 */
91 virtual bool get_nlp_info(Ipopt::Index& n, Ipopt::Index& m,
92 Ipopt::Index& nnz_jac_g, Ipopt::Index& nnz_h_lag,
93 IndexStyleEnum& index_style);
94
95 /**
96 * @brief Method to request bounds on the variables and constraints.
97 T
98 * @param[in] n Number of variables \f$x\f$ in the problem
99 * @param[out] x_l Lower bounds \f$x^L\f$ for the variables \f$x\f$
100 * @param[out] x_u Upper bounds \f$x^U\f$ for the variables \f$x\f$
101 * @param[in] m Number of constraints \f$g(x)\f$ in the problem
102 * @param[out] g_l Lower bounds \f$g^L\f$ for the constraints \f$g(x)\f$
103 * @param[out] g_u Upper bounds \f$g^U\f$ for the constraints \f$g(x)\f$
104 *
105 * @return true if success, false otherwise.
106 *
107 * The values of `n` and `m` that were specified in
108 IpoptInterface::get_nlp_info are passed
109 * here for debug checking. Setting a lower bound to a value less than or
110 * equal to the value of the option \ref OPT_nlp_lower_bound_inf
111 "nlp_lower_bound_inf"
112 * will cause %Ipopt to assume no lower bound. Likewise, specifying the upper
113 bound above or
114 * equal to the value of the option \ref OPT_nlp_upper_bound_inf
115 "nlp_upper_bound_inf"
116 * will cause %Ipopt to assume no upper bound. These options are set to
117 -10<sup>19</sup> and
118 * 10<sup>19</sup>, respectively, by default, but may be modified by changing
119 these
120 * options.
121 */
122 virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number* x_l,
123 Ipopt::Number* x_u, Ipopt::Index m,
124 Ipopt::Number* g_l, Ipopt::Number* g_u);
125
126 /**
127 * \brief Method to request the starting point before iterating.
128 *
129 * @param[in] n Number of variables \f$x\f$ in the problem; it
130 * will have the same value that was specified in
131 * `IpoptInterface::get_nlp_info`
132 * @param[in] init_x If true, this method must provide an initial value
133 * for \f$x\f$
134 * @param[out] x Initial values for the primal variables \f$x\f$
135 * @param[in] init_z If true, this method must provide an initial value
136 * for the bound multipliers \f$z^L\f$ and \f$z^U\f$
137 * @param[out] z_L Initial values for the bound multipliers \f$z^L\f$
138 * @param[out] z_U Initial values for the bound multipliers \f$z^U\f$
139 * @param[in] m Number of constraints \f$g(x)\f$ in the problem;
140 * it will have the same value that was specified in
141 * `IpoptInterface::get_nlp_info`
142 * @param[in] init_lambda If true, this method must provide an initial value
143 * for the constraint multipliers \f$\lambda\f$
144 * @param[out] lambda Initial values for the constraint multipliers,
145 * \f$\lambda\f$
146 *
147 * @return true if success, false otherwise.
148 *
149 * The boolean variables indicate whether the algorithm requires to have x,
150 * z_L/z_u, and lambda initialized, respectively. If, for some reason, the
151 * algorithm requires initializations that cannot be provided, false should be
152 * returned and %Ipopt will stop. The default options only require initial
153 * values for the primal variables \f$x\f$.
154 *
155 * Note, that the initial values for bound multiplier components for absent
156 * bounds (\f$x^L_i=-\infty\f$ or \f$x^U_i=\infty\f$) are ignored.
157 */
158 // [TNLP_get_starting_point]
159 virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number* x,
160 bool init_z, Ipopt::Number* z_L,
161 Ipopt::Number* z_U, Ipopt::Index m,
162 bool init_lambda, Ipopt::Number* lambda);
163
164 /**
165 * @brief Method to request the value of the objective function.
166 *
167 * @param[in] n Number of variables \f$x\f$ in the problem; it will
168 * have the same value that was specified in `IpoptInterface::get_nlp_info`
169 * @param[in] x Values for the primal variables \f$x\f$ at which the
170 * objective function \f$f(x)\f$ is to be evaluated
171 * @param[in] new_x False if any evaluation method (`eval_*`) was
172 * previously called with the same values in x, true otherwise. This can be
173 * helpful when users have efficient implementations that calculate multiple
174 * outputs at once. %Ipopt internally caches results from the TNLP and
175 * generally, this flag can be ignored.
176 * @param[out] obj_value Storage for the value of the objective function
177 * \f$f(x)\f$
178 *
179 * @return true if success, false otherwise.
180 */
181 virtual bool eval_f(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
182 Ipopt::Number& obj_value);
183
184 /**
185 * @brief Method to request the gradient of the objective function.
186 *
187 * @param[in] n Number of variables \f$x\f$ in the problem; it will
188 * have the same value that was specified in `IpoptInterface::get_nlp_info`
189 * @param[in] x Values for the primal variables \f$x\f$ at which the
190 * gradient \f$\nabla f(x)\f$ is to be evaluated
191 * @param[in] new_x False if any evaluation method (`eval_*`) was
192 * previously called with the same values in x, true otherwise; see also
193 * `IpoptInterface::eval_f`
194 * @param[out] grad_f Array to store values of the gradient of the objective
195 * function \f$\nabla f(x)\f$. The gradient array is in the same order as the
196 * \f$x\f$ variables (i.e., the gradient of the objective with respect to
197 * `x[2]` should be put in `grad_f[2]`).
198 *
199 * @return true if success, false otherwise.
200 */
201 virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
202 Ipopt::Number* grad_f);
203
204 /**
205 * @brief Method to request the constraint values.
206 *
207 * @param[in] n Number of variables \f$x\f$ in the problem; it will have
208 * the same value that was specified in `IpoptInterface::get_nlp_info`
209 * @param[in] x Values for the primal variables \f$x\f$ at which the
210 * constraint functions \f$g(x)\f$ are to be evaluated
211 * @param[in] new_x False if any evaluation method (`eval_*`) was previously
212 * called with the same values in x, true otherwise; see also
213 * `IpoptInterface::eval_f`
214 * @param[in] m Number of constraints \f$g(x)\f$ in the problem; it will
215 * have the same value that was specified in `IpoptInterface::get_nlp_info`
216 * @param[out] g Array to store constraint function values \f$g(x)\f$, do
217 * not add or subtract the bound values \f$g^L\f$ or \f$g^U\f$.
218 *
219 * @return true if success, false otherwise.
220 */
221 virtual bool eval_g(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
222 Ipopt::Index m, Ipopt::Number* g);
223
224 /**
225 * @brief Method to request either the sparsity structure or the values of the
226 * Jacobian of the constraints.
227 *
228 * The Jacobian is the matrix of derivatives where the derivative of
229 * constraint function \f$g_i\f$ with respect to variable \f$x_j\f$ is placed
230 * in row \f$i\f$ and column \f$j\f$. See \ref TRIPLET for a discussion of the
231 * sparse matrix format used in this method.
232 *
233 * @param[in] n Number of variables \f$x\f$ in the problem; it will
234 * have the same value that was specified in `IpoptInterface::get_nlp_info`
235 * @param[in] x First call: NULL; later calls: the values for the
236 * primal variables \f$x\f$ at which the constraint Jacobian \f$\nabla
237 * g(x)^T\f$ is to be evaluated
238 * @param[in] new_x False if any evaluation method (`eval_*`) was
239 * previously called with the same values in x, true otherwise; see also
240 * `IpoptInterface::eval_f`
241 * @param[in] m Number of constraints \f$g(x)\f$ in the problem; it
242 * will have the same value that was specified in
243 * `IpoptInterface::get_nlp_info`
244 * @param[in] nele_jac Number of nonzero elements in the Jacobian; it will
245 * have the same value that was specified in `IpoptInterface::get_nlp_info`
246 * @param[out] iRow First call: array of length `nele_jac` to store the
247 * row indices of entries in the Jacobian f the constraints; later calls: NULL
248 * @param[out] jCol First call: array of length `nele_jac` to store the
249 * column indices of entries in the acobian of the constraints; later calls:
250 * NULL
251 * @param[out] values First call: NULL; later calls: array of length
252 * nele_jac to store the values of the entries in the Jacobian of the
253 * constraints
254 *
255 * @return true if success, false otherwise.
256 *
257 * @note The arrays iRow and jCol only need to be filled once. If the iRow and
258 * jCol arguments are not NULL (first call to this function), then %Ipopt
259 * expects that the sparsity structure of the Jacobian (the row and column
260 * indices only) are written into iRow and jCol. At this call, the arguments
261 * `x` and `values` will be NULL. If the arguments `x` and `values` are not
262 * NULL, then %Ipopt expects that the value of the Jacobian as calculated from
263 * array `x` is stored in array `values` (using the same order as used when
264 * specifying the sparsity structure). At this call, the arguments `iRow` and
265 * `jCol` will be NULL.
266 */
267 virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
268 Ipopt::Index m, Ipopt::Index nele_jac,
269 Ipopt::Index* iRow, Ipopt::Index* jCol,
270 Ipopt::Number* values);
271
272 /**
273 * @brief Method to request either the sparsity structure or the values of the
274 * Hessian of the Lagrangian.
275 *
276 * The Hessian matrix that %Ipopt uses is
277 * \f[ \sigma_f \nabla^2 f(x_k) + \sum_{i=1}^m\lambda_i\nabla^2 g_i(x_k) \f]
278 * for the given values for \f$x\f$, \f$\sigma_f\f$, and \f$\lambda\f$.
279 * See \ref TRIPLET for a discussion of the sparse matrix format used in this
280 * method.
281 *
282 * @param[in] n Number of variables \f$x\f$ in the problem; it will
283 * have the same value that was specified in `IpoptInterface::get_nlp_info`
284 * @param[in] x First call: NULL; later calls: the values for the
285 * primal variables \f$x\f$ at which the Hessian is to be evaluated
286 * @param[in] new_x False if any evaluation method (`eval_*`) was
287 * previously called with the same values in x, true otherwise; see also
288 * IpoptInterface::eval_f
289 * @param[in] obj_factor Factor \f$\sigma_f\f$ in front of the objective term
290 * in the Hessian
291 * @param[in] m Number of constraints \f$g(x)\f$ in the problem; it
292 * will have the same value that was specified in
293 * `IpoptInterface::get_nlp_info`
294 * @param[in] lambda Values for the constraint multipliers \f$\lambda\f$
295 * at which the Hessian is to be evaluated
296 * @param[in] new_lambda False if any evaluation method was previously called
297 * with the same values in lambda, true otherwise
298 * @param[in] nele_hess Number of nonzero elements in the Hessian; it will
299 * have the same value that was specified in `IpoptInterface::get_nlp_info`
300 * @param[out] iRow First call: array of length nele_hess to store the
301 * row indices of entries in the Hessian; later calls: NULL
302 * @param[out] jCol First call: array of length nele_hess to store the
303 * column indices of entries in the Hessian; later calls: NULL
304 * @param[out] values First call: NULL; later calls: array of length
305 * nele_hess to store the values of the entries in the Hessian
306 *
307 * @return true if success, false otherwise.
308 *
309 * @note The arrays iRow and jCol only need to be filled once. If the iRow and
310 * jCol arguments are not NULL (first call to this function), then %Ipopt
311 * expects that the sparsity structure of the Hessian (the row and column
312 * indices only) are written into iRow and jCol. At this call, the arguments
313 * `x`, `lambda`, and `values` will be NULL. If the arguments `x`, `lambda`,
314 * and `values` are not NULL, then %Ipopt expects that the value of the
315 * Hessian as calculated from arrays `x` and `lambda` are stored in array
316 * `values` (using the same order as used when specifying the sparsity
317 * structure). At this call, the arguments `iRow` and `jCol` will be NULL.
318 *
319 * @attention As this matrix is symmetric, %Ipopt expects that only the lower
320 * diagonal entries are specified.
321 *
322 * A default implementation is provided, in case the user wants to set
323 * quasi-Newton approximations to estimate the second derivatives and doesn't
324 * not need to implement this method.
325 */
326 virtual bool eval_h(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
327 Ipopt::Number obj_factor, Ipopt::Index m,
328 const Ipopt::Number* lambda, bool new_lambda,
329 Ipopt::Index nele_hess, Ipopt::Index* iRow,
330 Ipopt::Index* jCol, Ipopt::Number* values);
331
332 /**
333 * @brief This method is called when the algorithm has finished (successfully
334 * or not) so the TNLP can digest the outcome, e.g., store/write the solution,
335 * if any.
336 *
337 * @param[in] status @parblock gives the status of the algorithm
338 * - SUCCESS: Algorithm terminated successfully at a locally optimal
339 * point, satisfying the convergence tolerances (can be specified
340 * by options).
341 * - MAXITER_EXCEEDED: Maximum number of iterations exceeded (can be
342 * specified by an option).
343 * - CPUTIME_EXCEEDED: Maximum number of CPU seconds exceeded (can be
344 * specified by an option).
345 * - STOP_AT_TINY_STEP: Algorithm proceeds with very little progress.
346 * - STOP_AT_ACCEPTABLE_POINT: Algorithm stopped at a point that was
347 * converged, not to "desired" tolerances, but to "acceptable" tolerances (see
348 * the acceptable-... options).
349 * - LOCAL_INFEASIBILITY: Algorithm converged to a point of local
350 * infeasibility. Problem may be infeasible.
351 * - USER_REQUESTED_STOP: The user call-back function
352 * IpoptInterface::intermediate_callback returned false, i.e., the user code
353 * requested a premature termination of the optimization.
354 * - DIVERGING_ITERATES: It seems that the iterates diverge.
355 * - RESTORATION_FAILURE: Restoration phase failed, algorithm doesn't know
356 * how to proceed.
357 * - ERROR_IN_STEP_COMPUTATION: An unrecoverable error occurred while %Ipopt
358 * tried to compute the search direction.
359 * - INVALID_NUMBER_DETECTED: Algorithm received an invalid number (such as
360 * NaN or Inf) from the NLP; see also option check_derivatives_for_nan_inf).
361 * - INTERNAL_ERROR: An unknown internal error occurred.
362 * @endparblock
363 * @param[in] n Number of variables \f$x\f$ in the problem; it will
364 * have the same value that was specified in `IpoptInterface::get_nlp_info`
365 * @param[in] x Final values for the primal variables
366 * @param[in] z_L Final values for the lower bound multipliers
367 * @param[in] z_U Final values for the upper bound multipliers
368 * @param[in] m Number of constraints \f$g(x)\f$ in the problem; it
369 * will have the same value that was specified in
370 * `IpoptInterface::get_nlp_info`
371 * @param[in] g Final values of the constraint functions
372 * @param[in] lambda Final values of the constraint multipliers
373 * @param[in] obj_value Final value of the objective function
374 * @param[in] ip_data Provided for expert users
375 * @param[in] ip_cq Provided for expert users
376 */
377 virtual void finalize_solution(
378 Ipopt::SolverReturn status, Ipopt::Index n, const Ipopt::Number* x,
379 const Ipopt::Number* z_L, const Ipopt::Number* z_U, Ipopt::Index m,
380 const Ipopt::Number* g, const Ipopt::Number* lambda,
381 Ipopt::Number obj_value, const Ipopt::IpoptData* ip_data,
382 Ipopt::IpoptCalculatedQuantities* ip_cq);
383
384 /**
385 * @brief Intermediate Callback method for the user.
386 *
387 * This method is called once per iteration (during the convergence check),
388 * and can be used to obtain information about the optimization status while
389 * %Ipopt solves the problem, and also to request a premature termination.
390 *
391 * The information provided by the entities in the argument list correspond to
392 * what %Ipopt prints in the iteration summary (see also \ref OUTPUT). Further
393 * information can be obtained from the ip_data and ip_cq objects. The current
394 * iterate and violations of feasibility and optimality can be accessed via
395 * the methods IpoptInterface::get_curr_iterate() and
396 * IpoptInterface::get_curr_violations(). These methods translate values for
397 * the *internal representation* of the problem from `ip_data` and `ip_cq`
398 * objects into the TNLP representation.
399 *
400 * @return If this method returns false, %Ipopt will terminate with the
401 * User_Requested_Stop status.
402 *
403 * It is not required to implement (overload) this method. The default
404 * implementation always returns true.
405 */
406 bool intermediate_callback(
407 Ipopt::AlgorithmMode mode, Ipopt::Index iter, Ipopt::Number obj_value,
408 Ipopt::Number inf_pr, Ipopt::Number inf_du, Ipopt::Number mu,
409 Ipopt::Number d_norm, Ipopt::Number regularization_size,
410 Ipopt::Number alpha_du, Ipopt::Number alpha_pr, Ipopt::Index ls_trials,
411 const Ipopt::IpoptData* ip_data, Ipopt::IpoptCalculatedQuantities* ip_cq);
412
413 /**
414 * @brief Create the data structure to store temporary computations
415 *
416 * @return the IpoptInterface Data
417 */
418 boost::shared_ptr<IpoptInterfaceData> createData(const std::size_t nx,
419 const std::size_t ndx,
420 const std::size_t nu);
421
422 void resizeData();
423
424 /**
425 * @brief Return the total number of optimization variables (states and
426 * controls)
427 */
428 std::size_t get_nvar() const;
429
430 /**
431 * @brief Return the total number of constraints in the NLP
432 */
433 std::size_t get_nconst() const;
434
435 /**
436 * @brief Return the state vector
437 */
438 const std::vector<Eigen::VectorXd>& get_xs() const;
439
440 /**
441 * @brief Return the control vector
442 */
443 const std::vector<Eigen::VectorXd>& get_us() const;
444
445 /**
446 * @brief Return the crocoddyl::ShootingProblem to be solved
447 */
448 const boost::shared_ptr<crocoddyl::ShootingProblem>& get_problem() const;
449
450 double get_cost() const;
451
452 /**
453 * @brief Modify the state vector
454 */
455 void set_xs(const std::vector<Eigen::VectorXd>& xs);
456
457 /**
458 * @brief Modify the control vector
459 */
460 void set_us(const std::vector<Eigen::VectorXd>& us);
461
462 private:
463 boost::shared_ptr<crocoddyl::ShootingProblem>
464 problem_; //!< Optimal control problem
465 std::vector<Eigen::VectorXd> xs_; //!< Vector of states
466 std::vector<Eigen::VectorXd> us_; //!< Vector of controls
467 std::vector<std::size_t> ixu_; //!< Index of at node i
468 std::size_t nvar_; //!< Number of NLP variables
469 std::size_t nconst_; //!< Number of the NLP constraints
470 std::vector<boost::shared_ptr<IpoptInterfaceData>>
471 datas_; //!< Vector of Datas
472 double cost_; //!< Total cost
473
474 IpoptInterface(const IpoptInterface&);
475
476 IpoptInterface& operator=(const IpoptInterface&);
477 };
478
479 struct IpoptInterfaceData {
480 EIGEN_MAKE_ALIGNED_OPERATOR_NEW
481
482 66 IpoptInterfaceData(const std::size_t nx, const std::size_t ndx,
483 const std::size_t nu)
484 66 : x(nx),
485
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 xnext(nx),
486
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 dx(ndx),
487
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 dxnext(ndx),
488
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 x_diff(ndx),
489
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 u(nu),
490
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jint_dx(ndx, ndx),
491
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jint_dxnext(ndx, ndx),
492
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jdiff_x(ndx, ndx),
493
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jdiff_xnext(ndx, ndx),
494
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jg_dx(ndx, ndx),
495
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jg_dxnext(ndx, ndx),
496
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jg_u(ndx, ndx),
497
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jg_ic(ndx, ndx),
498
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 FxJint_dx(ndx, ndx),
499
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Ldx(ndx),
500
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Ldxdx(ndx, ndx),
501
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Ldxu(ndx, nu) {
502
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 x.setZero();
503
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 xnext.setZero();
504
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 dx.setZero();
505
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 dxnext.setZero();
506
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 x_diff.setZero();
507
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 u.setZero();
508
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jint_dx.setZero();
509
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jint_dxnext.setZero();
510
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jdiff_x.setZero();
511
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jdiff_xnext.setZero();
512
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jg_dx.setZero();
513
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jg_dxnext.setZero();
514
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jg_u.setZero();
515
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Jg_ic.setZero();
516
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 FxJint_dx.setZero();
517
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Ldx.setZero();
518
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Ldxdx.setZero();
519
1/2
✓ Branch 1 taken 66 times.
✗ Branch 2 not taken.
66 Ldxu.setZero();
520 66 }
521
522 void resize(const std::size_t nx, const std::size_t ndx,
523 const std::size_t nu) {
524 x.conservativeResize(nx);
525 xnext.conservativeResize(nx);
526 dx.conservativeResize(ndx);
527 dxnext.conservativeResize(ndx);
528 x_diff.conservativeResize(ndx);
529 u.conservativeResize(nu);
530 Jint_dx.conservativeResize(ndx, ndx);
531 Jint_dxnext.conservativeResize(ndx, ndx);
532 Jdiff_x.conservativeResize(ndx, ndx);
533 Jdiff_xnext.conservativeResize(ndx, ndx);
534 Jg_dx.conservativeResize(ndx, ndx);
535 Jg_dxnext.conservativeResize(ndx, ndx);
536 Jg_u.conservativeResize(ndx, ndx);
537 Jg_ic.conservativeResize(ndx, ndx);
538 FxJint_dx.conservativeResize(ndx, ndx);
539 Ldx.conservativeResize(ndx);
540 Ldxdx.conservativeResize(ndx, ndx);
541 Ldxu.conservativeResize(ndx, nu);
542 }
543
544 Eigen::VectorXd x; //!< Integrated state
545 Eigen::VectorXd xnext; //!< Integrated state at next node
546 Eigen::VectorXd dx; //!< Increment in the tangent space
547 Eigen::VectorXd dxnext; //!< Increment in the tangent space at next node
548 Eigen::VectorXd x_diff; //!< State difference
549 Eigen::VectorXd u; //!< Control
550 Eigen::MatrixXd Jint_dx; //!< Jacobian of the sum operation w.r.t dx
551 Eigen::MatrixXd
552 Jint_dxnext; //!< Jacobian of the sum operation w.r.t dx at next node
553 Eigen::MatrixXd
554 Jdiff_x; //!< Jacobian of the diff operation w.r.t the first element
555 Eigen::MatrixXd Jdiff_xnext; //!< Jacobian of the diff operation w.r.t the
556 //!< first element at the next node
557 Eigen::MatrixXd Jg_dx; //!< Jacobian of the dynamic constraint w.r.t dx
558 Eigen::MatrixXd
559 Jg_dxnext; //!< Jacobian of the dynamic constraint w.r.t dxnext
560 Eigen::MatrixXd Jg_u; //!< Jacobian of the dynamic constraint w.r.t u
561 Eigen::MatrixXd
562 Jg_ic; //!< Jacobian of the initial condition constraint w.r.t dx
563 Eigen::MatrixXd FxJint_dx; //!< Intermediate computation needed for Jg_ic
564 Eigen::VectorXd Ldx; //!< Jacobian of the cost w.r.t dx
565 Eigen::MatrixXd Ldxdx; //!< Hessian of the cost w.r.t dxdx
566 Eigen::MatrixXd Ldxu; //!< Hessian of the cost w.r.t dxu
567 };
568
569 } // namespace crocoddyl
570
571 #endif
572