| Line | Branch | Exec | Source |
|---|---|---|---|
| 1 | /////////////////////////////////////////////////////////////////////////////// | ||
| 2 | // BSD 3-Clause License | ||
| 3 | // | ||
| 4 | // Copyright (C) 2022-2023, IRI: CSIC-UPC, Heriot-Watt University | ||
| 5 | // Copyright note valid unless otherwise stated in individual files. | ||
| 6 | // All rights reserved. | ||
| 7 | /////////////////////////////////////////////////////////////////////////////// | ||
| 8 | |||
| 9 | #ifndef __CROCODDYL_CORE_SOLVERS_IPOPT_IPOPT_IFACE_HPP__ | ||
| 10 | #define __CROCODDYL_CORE_SOLVERS_IPOPT_IPOPT_IFACE_HPP__ | ||
| 11 | |||
| 12 | #define HAVE_CSTDDEF | ||
| 13 | #include <IpTNLP.hpp> | ||
| 14 | #undef HAVE_CSTDDEF | ||
| 15 | |||
| 16 | #include "crocoddyl/core/optctrl/shooting.hpp" | ||
| 17 | |||
| 18 | namespace crocoddyl { | ||
| 19 | |||
| 20 | struct IpoptInterfaceData; | ||
| 21 | |||
| 22 | /** | ||
| 23 | * @brief Class for interfacing a crocoddyl::ShootingProblem with IPOPT | ||
| 24 | * | ||
| 25 | * This class implements the pure virtual functions from Ipopt::TNLP to solve | ||
| 26 | * the optimal control problem in `problem_` using a multiple shooting approach. | ||
| 27 | * | ||
| 28 | * Ipopt considers its decision variables `x` to belong to the Euclidean space. | ||
| 29 | * However, Crocoddyl states could lie in a manifold. To ensure that the | ||
| 30 | * solution of Ipopt lies in the manifold of the state, we perform the | ||
| 31 | * optimization in the tangent space of a given initial state. Finally we | ||
| 32 | * retract the Ipopt solution to the manifold. That is: | ||
| 33 | * * \f[ | ||
| 34 | * \begin{aligned} | ||
| 35 | * \mathbf{x}^* = \mathbf{x}^0 \oplus \mathbf{\Delta x}^* | ||
| 36 | * \end{aligned} | ||
| 37 | * \f] | ||
| 38 | * | ||
| 39 | * where \f$\mathbf{x}^*\f$ is the final solution, \f$\mathbf{x}^0\f$ is the | ||
| 40 | * initial guess and \f$\mathbf{\Delta x}^*\f$ is the Ipopt solution in the | ||
| 41 | * tangent space of \f$\mathbf{x}_0\f$. Due to this procedure, the computation | ||
| 42 | * of the cost function, the dynamic constraint as well as their corresponding | ||
| 43 | * derivatives should be properly modified. | ||
| 44 | * | ||
| 45 | * The Ipopt decision vector is built as follows: \f$x = [ \mathbf{\Delta | ||
| 46 | * x}_0^\top, \mathbf{u}_0^\top, \mathbf{\Delta x}_1^\top, \mathbf{u}_1^\top, | ||
| 47 | * \dots, \mathbf{\Delta x}_N^\top ]\f$ | ||
| 48 | * | ||
| 49 | * Dynamic constraints are posed as: \f$(\mathbf{x}^0_{k+1} \oplus | ||
| 50 | * \mathbf{\Delta x}_{k+1}) \ominus \mathbf{f}(\mathbf{x}_{k}^0 \oplus | ||
| 51 | * \mathbf{\Delta x}_{k}, \mathbf{u}_k) = \mathbf{0}\f$ | ||
| 52 | * | ||
| 53 | * Initial condition: \f$ \mathbf{x}(0) \ominus (\mathbf{x}_{k}^0 \oplus | ||
| 54 | * \mathbf{\Delta x}_{k}) = \mathbf{0}\f$ | ||
| 55 | * | ||
| 56 | * Documentation of the methods has been extracted from Ipopt::TNLP.hpp file | ||
| 57 | * | ||
| 58 | * \sa `get_nlp_info()`, `get_bounds_info()`, `eval_f()`, `eval_g()`, | ||
| 59 | * `eval_grad_f()`, `eval_jac_g()`, `eval_h()` | ||
| 60 | */ | ||
| 61 | |||
| 62 | class IpoptInterface : public Ipopt::TNLP { | ||
| 63 | public: | ||
| 64 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW | ||
| 65 | |||
| 66 | /** | ||
| 67 | * @brief Initialize the Ipopt interface | ||
| 68 | * | ||
| 69 | * @param[in] problem Crocoddyl shooting problem | ||
| 70 | */ | ||
| 71 | IpoptInterface(const std::shared_ptr<crocoddyl::ShootingProblem>& problem); | ||
| 72 | |||
| 73 | virtual ~IpoptInterface(); | ||
| 74 | |||
| 75 | /** | ||
| 76 | * @brief Methods to gather information about the NLP | ||
| 77 | * | ||
| 78 | * %Ipopt uses this information when allocating the arrays that it will later | ||
| 79 | * ask you to fill with values. Be careful in this method since incorrect | ||
| 80 | * values will cause memory bugs which may be very difficult to find. | ||
| 81 | * @param[out] n Storage for the number of variables \f$x\f$ | ||
| 82 | * @param[out] m Storage for the number of constraints \f$g(x)\f$ | ||
| 83 | * @param[out] nnz_jac_g Storage for the number of nonzero entries in the | ||
| 84 | * Jacobian | ||
| 85 | * @param[out] nnz_h_lag Storage for the number of nonzero entries in the | ||
| 86 | * Hessian | ||
| 87 | * @param[out] index_style Storage for the index style the numbering style | ||
| 88 | * used for row/col entries in the sparse matrix format | ||
| 89 | */ | ||
| 90 | virtual bool get_nlp_info(Ipopt::Index& n, Ipopt::Index& m, | ||
| 91 | Ipopt::Index& nnz_jac_g, Ipopt::Index& nnz_h_lag, | ||
| 92 | IndexStyleEnum& index_style); | ||
| 93 | |||
| 94 | /** | ||
| 95 | * @brief Method to request bounds on the variables and constraints. | ||
| 96 | T | ||
| 97 | * @param[in] n Number of variables \f$x\f$ in the problem | ||
| 98 | * @param[out] x_l Lower bounds \f$x^L\f$ for the variables \f$x\f$ | ||
| 99 | * @param[out] x_u Upper bounds \f$x^U\f$ for the variables \f$x\f$ | ||
| 100 | * @param[in] m Number of constraints \f$g(x)\f$ in the problem | ||
| 101 | * @param[out] g_l Lower bounds \f$g^L\f$ for the constraints \f$g(x)\f$ | ||
| 102 | * @param[out] g_u Upper bounds \f$g^U\f$ for the constraints \f$g(x)\f$ | ||
| 103 | * | ||
| 104 | * @return true if success, false otherwise. | ||
| 105 | * | ||
| 106 | * The values of `n` and `m` that were specified in | ||
| 107 | IpoptInterface::get_nlp_info are passed | ||
| 108 | * here for debug checking. Setting a lower bound to a value less than or | ||
| 109 | * equal to the value of the option \ref OPT_nlp_lower_bound_inf | ||
| 110 | "nlp_lower_bound_inf" | ||
| 111 | * will cause %Ipopt to assume no lower bound. Likewise, specifying the upper | ||
| 112 | bound above or | ||
| 113 | * equal to the value of the option \ref OPT_nlp_upper_bound_inf | ||
| 114 | "nlp_upper_bound_inf" | ||
| 115 | * will cause %Ipopt to assume no upper bound. These options are set to | ||
| 116 | -10<sup>19</sup> and | ||
| 117 | * 10<sup>19</sup>, respectively, by default, but may be modified by changing | ||
| 118 | these | ||
| 119 | * options. | ||
| 120 | */ | ||
| 121 | virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number* x_l, | ||
| 122 | Ipopt::Number* x_u, Ipopt::Index m, | ||
| 123 | Ipopt::Number* g_l, Ipopt::Number* g_u); | ||
| 124 | |||
| 125 | /** | ||
| 126 | * \brief Method to request the starting point before iterating. | ||
| 127 | * | ||
| 128 | * @param[in] n Number of variables \f$x\f$ in the problem; it | ||
| 129 | * will have the same value that was specified in | ||
| 130 | * `IpoptInterface::get_nlp_info` | ||
| 131 | * @param[in] init_x If true, this method must provide an initial value | ||
| 132 | * for \f$x\f$ | ||
| 133 | * @param[out] x Initial values for the primal variables \f$x\f$ | ||
| 134 | * @param[in] init_z If true, this method must provide an initial value | ||
| 135 | * for the bound multipliers \f$z^L\f$ and \f$z^U\f$ | ||
| 136 | * @param[out] z_L Initial values for the bound multipliers \f$z^L\f$ | ||
| 137 | * @param[out] z_U Initial values for the bound multipliers \f$z^U\f$ | ||
| 138 | * @param[in] m Number of constraints \f$g(x)\f$ in the problem; | ||
| 139 | * it will have the same value that was specified in | ||
| 140 | * `IpoptInterface::get_nlp_info` | ||
| 141 | * @param[in] init_lambda If true, this method must provide an initial value | ||
| 142 | * for the constraint multipliers \f$\lambda\f$ | ||
| 143 | * @param[out] lambda Initial values for the constraint multipliers, | ||
| 144 | * \f$\lambda\f$ | ||
| 145 | * | ||
| 146 | * @return true if success, false otherwise. | ||
| 147 | * | ||
| 148 | * The boolean variables indicate whether the algorithm requires to have x, | ||
| 149 | * z_L/z_u, and lambda initialized, respectively. If, for some reason, the | ||
| 150 | * algorithm requires initializations that cannot be provided, false should be | ||
| 151 | * returned and %Ipopt will stop. The default options only require initial | ||
| 152 | * values for the primal variables \f$x\f$. | ||
| 153 | * | ||
| 154 | * Note, that the initial values for bound multiplier components for absent | ||
| 155 | * bounds (\f$x^L_i=-\infty\f$ or \f$x^U_i=\infty\f$) are ignored. | ||
| 156 | */ | ||
| 157 | // [TNLP_get_starting_point] | ||
| 158 | virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number* x, | ||
| 159 | bool init_z, Ipopt::Number* z_L, | ||
| 160 | Ipopt::Number* z_U, Ipopt::Index m, | ||
| 161 | bool init_lambda, Ipopt::Number* lambda); | ||
| 162 | |||
| 163 | /** | ||
| 164 | * @brief Method to request the value of the objective function. | ||
| 165 | * | ||
| 166 | * @param[in] n Number of variables \f$x\f$ in the problem; it will | ||
| 167 | * have the same value that was specified in `IpoptInterface::get_nlp_info` | ||
| 168 | * @param[in] x Values for the primal variables \f$x\f$ at which the | ||
| 169 | * objective function \f$f(x)\f$ is to be evaluated | ||
| 170 | * @param[in] new_x False if any evaluation method (`eval_*`) was | ||
| 171 | * previously called with the same values in x, true otherwise. This can be | ||
| 172 | * helpful when users have efficient implementations that calculate multiple | ||
| 173 | * outputs at once. %Ipopt internally caches results from the TNLP and | ||
| 174 | * generally, this flag can be ignored. | ||
| 175 | * @param[out] obj_value Storage for the value of the objective function | ||
| 176 | * \f$f(x)\f$ | ||
| 177 | * | ||
| 178 | * @return true if success, false otherwise. | ||
| 179 | */ | ||
| 180 | virtual bool eval_f(Ipopt::Index n, const Ipopt::Number* x, bool new_x, | ||
| 181 | Ipopt::Number& obj_value); | ||
| 182 | |||
| 183 | /** | ||
| 184 | * @brief Method to request the gradient of the objective function. | ||
| 185 | * | ||
| 186 | * @param[in] n Number of variables \f$x\f$ in the problem; it will | ||
| 187 | * have the same value that was specified in `IpoptInterface::get_nlp_info` | ||
| 188 | * @param[in] x Values for the primal variables \f$x\f$ at which the | ||
| 189 | * gradient \f$\nabla f(x)\f$ is to be evaluated | ||
| 190 | * @param[in] new_x False if any evaluation method (`eval_*`) was | ||
| 191 | * previously called with the same values in x, true otherwise; see also | ||
| 192 | * `IpoptInterface::eval_f` | ||
| 193 | * @param[out] grad_f Array to store values of the gradient of the objective | ||
| 194 | * function \f$\nabla f(x)\f$. The gradient array is in the same order as the | ||
| 195 | * \f$x\f$ variables (i.e., the gradient of the objective with respect to | ||
| 196 | * `x[2]` should be put in `grad_f[2]`). | ||
| 197 | * | ||
| 198 | * @return true if success, false otherwise. | ||
| 199 | */ | ||
| 200 | virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number* x, bool new_x, | ||
| 201 | Ipopt::Number* grad_f); | ||
| 202 | |||
| 203 | /** | ||
| 204 | * @brief Method to request the constraint values. | ||
| 205 | * | ||
| 206 | * @param[in] n Number of variables \f$x\f$ in the problem; it will have | ||
| 207 | * the same value that was specified in `IpoptInterface::get_nlp_info` | ||
| 208 | * @param[in] x Values for the primal variables \f$x\f$ at which the | ||
| 209 | * constraint functions \f$g(x)\f$ are to be evaluated | ||
| 210 | * @param[in] new_x False if any evaluation method (`eval_*`) was previously | ||
| 211 | * called with the same values in x, true otherwise; see also | ||
| 212 | * `IpoptInterface::eval_f` | ||
| 213 | * @param[in] m Number of constraints \f$g(x)\f$ in the problem; it will | ||
| 214 | * have the same value that was specified in `IpoptInterface::get_nlp_info` | ||
| 215 | * @param[out] g Array to store constraint function values \f$g(x)\f$, do | ||
| 216 | * not add or subtract the bound values \f$g^L\f$ or \f$g^U\f$. | ||
| 217 | * | ||
| 218 | * @return true if success, false otherwise. | ||
| 219 | */ | ||
| 220 | virtual bool eval_g(Ipopt::Index n, const Ipopt::Number* x, bool new_x, | ||
| 221 | Ipopt::Index m, Ipopt::Number* g); | ||
| 222 | |||
| 223 | /** | ||
| 224 | * @brief Method to request either the sparsity structure or the values of the | ||
| 225 | * Jacobian of the constraints. | ||
| 226 | * | ||
| 227 | * The Jacobian is the matrix of derivatives where the derivative of | ||
| 228 | * constraint function \f$g_i\f$ with respect to variable \f$x_j\f$ is placed | ||
| 229 | * in row \f$i\f$ and column \f$j\f$. See \ref TRIPLET for a discussion of the | ||
| 230 | * sparse matrix format used in this method. | ||
| 231 | * | ||
| 232 | * @param[in] n Number of variables \f$x\f$ in the problem; it will | ||
| 233 | * have the same value that was specified in `IpoptInterface::get_nlp_info` | ||
| 234 | * @param[in] x First call: NULL; later calls: the values for the | ||
| 235 | * primal variables \f$x\f$ at which the constraint Jacobian \f$\nabla | ||
| 236 | * g(x)^T\f$ is to be evaluated | ||
| 237 | * @param[in] new_x False if any evaluation method (`eval_*`) was | ||
| 238 | * previously called with the same values in x, true otherwise; see also | ||
| 239 | * `IpoptInterface::eval_f` | ||
| 240 | * @param[in] m Number of constraints \f$g(x)\f$ in the problem; it | ||
| 241 | * will have the same value that was specified in | ||
| 242 | * `IpoptInterface::get_nlp_info` | ||
| 243 | * @param[in] nele_jac Number of nonzero elements in the Jacobian; it will | ||
| 244 | * have the same value that was specified in `IpoptInterface::get_nlp_info` | ||
| 245 | * @param[out] iRow First call: array of length `nele_jac` to store the | ||
| 246 | * row indices of entries in the Jacobian f the constraints; later calls: NULL | ||
| 247 | * @param[out] jCol First call: array of length `nele_jac` to store the | ||
| 248 | * column indices of entries in the acobian of the constraints; later calls: | ||
| 249 | * NULL | ||
| 250 | * @param[out] values First call: NULL; later calls: array of length | ||
| 251 | * nele_jac to store the values of the entries in the Jacobian of the | ||
| 252 | * constraints | ||
| 253 | * | ||
| 254 | * @return true if success, false otherwise. | ||
| 255 | * | ||
| 256 | * @note The arrays iRow and jCol only need to be filled once. If the iRow and | ||
| 257 | * jCol arguments are not NULL (first call to this function), then %Ipopt | ||
| 258 | * expects that the sparsity structure of the Jacobian (the row and column | ||
| 259 | * indices only) are written into iRow and jCol. At this call, the arguments | ||
| 260 | * `x` and `values` will be NULL. If the arguments `x` and `values` are not | ||
| 261 | * NULL, then %Ipopt expects that the value of the Jacobian as calculated from | ||
| 262 | * array `x` is stored in array `values` (using the same order as used when | ||
| 263 | * specifying the sparsity structure). At this call, the arguments `iRow` and | ||
| 264 | * `jCol` will be NULL. | ||
| 265 | */ | ||
| 266 | virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number* x, bool new_x, | ||
| 267 | Ipopt::Index m, Ipopt::Index nele_jac, | ||
| 268 | Ipopt::Index* iRow, Ipopt::Index* jCol, | ||
| 269 | Ipopt::Number* values); | ||
| 270 | |||
| 271 | /** | ||
| 272 | * @brief Method to request either the sparsity structure or the values of the | ||
| 273 | * Hessian of the Lagrangian. | ||
| 274 | * | ||
| 275 | * The Hessian matrix that %Ipopt uses is | ||
| 276 | * \f[ \sigma_f \nabla^2 f(x_k) + \sum_{i=1}^m\lambda_i\nabla^2 g_i(x_k) \f] | ||
| 277 | * for the given values for \f$x\f$, \f$\sigma_f\f$, and \f$\lambda\f$. | ||
| 278 | * See \ref TRIPLET for a discussion of the sparse matrix format used in this | ||
| 279 | * method. | ||
| 280 | * | ||
| 281 | * @param[in] n Number of variables \f$x\f$ in the problem; it will | ||
| 282 | * have the same value that was specified in `IpoptInterface::get_nlp_info` | ||
| 283 | * @param[in] x First call: NULL; later calls: the values for the | ||
| 284 | * primal variables \f$x\f$ at which the Hessian is to be evaluated | ||
| 285 | * @param[in] new_x False if any evaluation method (`eval_*`) was | ||
| 286 | * previously called with the same values in x, true otherwise; see also | ||
| 287 | * IpoptInterface::eval_f | ||
| 288 | * @param[in] obj_factor Factor \f$\sigma_f\f$ in front of the objective term | ||
| 289 | * in the Hessian | ||
| 290 | * @param[in] m Number of constraints \f$g(x)\f$ in the problem; it | ||
| 291 | * will have the same value that was specified in | ||
| 292 | * `IpoptInterface::get_nlp_info` | ||
| 293 | * @param[in] lambda Values for the constraint multipliers \f$\lambda\f$ | ||
| 294 | * at which the Hessian is to be evaluated | ||
| 295 | * @param[in] new_lambda False if any evaluation method was previously called | ||
| 296 | * with the same values in lambda, true otherwise | ||
| 297 | * @param[in] nele_hess Number of nonzero elements in the Hessian; it will | ||
| 298 | * have the same value that was specified in `IpoptInterface::get_nlp_info` | ||
| 299 | * @param[out] iRow First call: array of length nele_hess to store the | ||
| 300 | * row indices of entries in the Hessian; later calls: NULL | ||
| 301 | * @param[out] jCol First call: array of length nele_hess to store the | ||
| 302 | * column indices of entries in the Hessian; later calls: NULL | ||
| 303 | * @param[out] values First call: NULL; later calls: array of length | ||
| 304 | * nele_hess to store the values of the entries in the Hessian | ||
| 305 | * | ||
| 306 | * @return true if success, false otherwise. | ||
| 307 | * | ||
| 308 | * @note The arrays iRow and jCol only need to be filled once. If the iRow and | ||
| 309 | * jCol arguments are not NULL (first call to this function), then %Ipopt | ||
| 310 | * expects that the sparsity structure of the Hessian (the row and column | ||
| 311 | * indices only) are written into iRow and jCol. At this call, the arguments | ||
| 312 | * `x`, `lambda`, and `values` will be NULL. If the arguments `x`, `lambda`, | ||
| 313 | * and `values` are not NULL, then %Ipopt expects that the value of the | ||
| 314 | * Hessian as calculated from arrays `x` and `lambda` are stored in array | ||
| 315 | * `values` (using the same order as used when specifying the sparsity | ||
| 316 | * structure). At this call, the arguments `iRow` and `jCol` will be NULL. | ||
| 317 | * | ||
| 318 | * @attention As this matrix is symmetric, %Ipopt expects that only the lower | ||
| 319 | * diagonal entries are specified. | ||
| 320 | * | ||
| 321 | * A default implementation is provided, in case the user wants to set | ||
| 322 | * quasi-Newton approximations to estimate the second derivatives and doesn't | ||
| 323 | * not need to implement this method. | ||
| 324 | */ | ||
| 325 | virtual bool eval_h(Ipopt::Index n, const Ipopt::Number* x, bool new_x, | ||
| 326 | Ipopt::Number obj_factor, Ipopt::Index m, | ||
| 327 | const Ipopt::Number* lambda, bool new_lambda, | ||
| 328 | Ipopt::Index nele_hess, Ipopt::Index* iRow, | ||
| 329 | Ipopt::Index* jCol, Ipopt::Number* values); | ||
| 330 | |||
| 331 | /** | ||
| 332 | * @brief This method is called when the algorithm has finished (successfully | ||
| 333 | * or not) so the TNLP can digest the outcome, e.g., store/write the solution, | ||
| 334 | * if any. | ||
| 335 | * | ||
| 336 | * @param[in] status @parblock gives the status of the algorithm | ||
| 337 | * - SUCCESS: Algorithm terminated successfully at a locally optimal | ||
| 338 | * point, satisfying the convergence tolerances (can be specified | ||
| 339 | * by options). | ||
| 340 | * - MAXITER_EXCEEDED: Maximum number of iterations exceeded (can be | ||
| 341 | * specified by an option). | ||
| 342 | * - CPUTIME_EXCEEDED: Maximum number of CPU seconds exceeded (can be | ||
| 343 | * specified by an option). | ||
| 344 | * - STOP_AT_TINY_STEP: Algorithm proceeds with very little progress. | ||
| 345 | * - STOP_AT_ACCEPTABLE_POINT: Algorithm stopped at a point that was | ||
| 346 | * converged, not to "desired" tolerances, but to "acceptable" tolerances (see | ||
| 347 | * the acceptable-... options). | ||
| 348 | * - LOCAL_INFEASIBILITY: Algorithm converged to a point of local | ||
| 349 | * infeasibility. Problem may be infeasible. | ||
| 350 | * - USER_REQUESTED_STOP: The user call-back function | ||
| 351 | * IpoptInterface::intermediate_callback returned false, i.e., the user code | ||
| 352 | * requested a premature termination of the optimization. | ||
| 353 | * - DIVERGING_ITERATES: It seems that the iterates diverge. | ||
| 354 | * - RESTORATION_FAILURE: Restoration phase failed, algorithm doesn't know | ||
| 355 | * how to proceed. | ||
| 356 | * - ERROR_IN_STEP_COMPUTATION: An unrecoverable error occurred while %Ipopt | ||
| 357 | * tried to compute the search direction. | ||
| 358 | * - INVALID_NUMBER_DETECTED: Algorithm received an invalid number (such as | ||
| 359 | * NaN or Inf) from the NLP; see also option check_derivatives_for_nan_inf). | ||
| 360 | * - INTERNAL_ERROR: An unknown internal error occurred. | ||
| 361 | * @endparblock | ||
| 362 | * @param[in] n Number of variables \f$x\f$ in the problem; it will | ||
| 363 | * have the same value that was specified in `IpoptInterface::get_nlp_info` | ||
| 364 | * @param[in] x Final values for the primal variables | ||
| 365 | * @param[in] z_L Final values for the lower bound multipliers | ||
| 366 | * @param[in] z_U Final values for the upper bound multipliers | ||
| 367 | * @param[in] m Number of constraints \f$g(x)\f$ in the problem; it | ||
| 368 | * will have the same value that was specified in | ||
| 369 | * `IpoptInterface::get_nlp_info` | ||
| 370 | * @param[in] g Final values of the constraint functions | ||
| 371 | * @param[in] lambda Final values of the constraint multipliers | ||
| 372 | * @param[in] obj_value Final value of the objective function | ||
| 373 | * @param[in] ip_data Provided for expert users | ||
| 374 | * @param[in] ip_cq Provided for expert users | ||
| 375 | */ | ||
| 376 | virtual void finalize_solution( | ||
| 377 | Ipopt::SolverReturn status, Ipopt::Index n, const Ipopt::Number* x, | ||
| 378 | const Ipopt::Number* z_L, const Ipopt::Number* z_U, Ipopt::Index m, | ||
| 379 | const Ipopt::Number* g, const Ipopt::Number* lambda, | ||
| 380 | Ipopt::Number obj_value, const Ipopt::IpoptData* ip_data, | ||
| 381 | Ipopt::IpoptCalculatedQuantities* ip_cq); | ||
| 382 | |||
| 383 | /** | ||
| 384 | * @brief Intermediate Callback method for the user. | ||
| 385 | * | ||
| 386 | * This method is called once per iteration (during the convergence check), | ||
| 387 | * and can be used to obtain information about the optimization status while | ||
| 388 | * %Ipopt solves the problem, and also to request a premature termination. | ||
| 389 | * | ||
| 390 | * The information provided by the entities in the argument list correspond to | ||
| 391 | * what %Ipopt prints in the iteration summary (see also \ref OUTPUT). Further | ||
| 392 | * information can be obtained from the ip_data and ip_cq objects. The current | ||
| 393 | * iterate and violations of feasibility and optimality can be accessed via | ||
| 394 | * the methods IpoptInterface::get_curr_iterate() and | ||
| 395 | * IpoptInterface::get_curr_violations(). These methods translate values for | ||
| 396 | * the *internal representation* of the problem from `ip_data` and `ip_cq` | ||
| 397 | * objects into the TNLP representation. | ||
| 398 | * | ||
| 399 | * @return If this method returns false, %Ipopt will terminate with the | ||
| 400 | * User_Requested_Stop status. | ||
| 401 | * | ||
| 402 | * It is not required to implement (overload) this method. The default | ||
| 403 | * implementation always returns true. | ||
| 404 | */ | ||
| 405 | bool intermediate_callback( | ||
| 406 | Ipopt::AlgorithmMode mode, Ipopt::Index iter, Ipopt::Number obj_value, | ||
| 407 | Ipopt::Number inf_pr, Ipopt::Number inf_du, Ipopt::Number mu, | ||
| 408 | Ipopt::Number d_norm, Ipopt::Number regularization_size, | ||
| 409 | Ipopt::Number alpha_du, Ipopt::Number alpha_pr, Ipopt::Index ls_trials, | ||
| 410 | const Ipopt::IpoptData* ip_data, Ipopt::IpoptCalculatedQuantities* ip_cq); | ||
| 411 | |||
| 412 | /** | ||
| 413 | * @brief Create the data structure to store temporary computations | ||
| 414 | * | ||
| 415 | * @return the IpoptInterface Data | ||
| 416 | */ | ||
| 417 | std::shared_ptr<IpoptInterfaceData> createData(const std::size_t nx, | ||
| 418 | const std::size_t ndx, | ||
| 419 | const std::size_t nu); | ||
| 420 | |||
| 421 | void resizeData(); | ||
| 422 | |||
| 423 | /** | ||
| 424 | * @brief Return the total number of optimization variables (states and | ||
| 425 | * controls) | ||
| 426 | */ | ||
| 427 | std::size_t get_nvar() const; | ||
| 428 | |||
| 429 | /** | ||
| 430 | * @brief Return the total number of constraints in the NLP | ||
| 431 | */ | ||
| 432 | std::size_t get_nconst() const; | ||
| 433 | |||
| 434 | /** | ||
| 435 | * @brief Return the state vector | ||
| 436 | */ | ||
| 437 | const std::vector<Eigen::VectorXd>& get_xs() const; | ||
| 438 | |||
| 439 | /** | ||
| 440 | * @brief Return the control vector | ||
| 441 | */ | ||
| 442 | const std::vector<Eigen::VectorXd>& get_us() const; | ||
| 443 | |||
| 444 | /** | ||
| 445 | * @brief Return the crocoddyl::ShootingProblem to be solved | ||
| 446 | */ | ||
| 447 | const std::shared_ptr<crocoddyl::ShootingProblem>& get_problem() const; | ||
| 448 | |||
| 449 | double get_cost() const; | ||
| 450 | |||
| 451 | /** | ||
| 452 | * @brief Modify the state vector | ||
| 453 | */ | ||
| 454 | void set_xs(const std::vector<Eigen::VectorXd>& xs); | ||
| 455 | |||
| 456 | /** | ||
| 457 | * @brief Modify the control vector | ||
| 458 | */ | ||
| 459 | void set_us(const std::vector<Eigen::VectorXd>& us); | ||
| 460 | |||
| 461 | private: | ||
| 462 | std::shared_ptr<crocoddyl::ShootingProblem> | ||
| 463 | problem_; //!< Optimal control problem | ||
| 464 | std::vector<Eigen::VectorXd> xs_; //!< Vector of states | ||
| 465 | std::vector<Eigen::VectorXd> us_; //!< Vector of controls | ||
| 466 | std::vector<std::size_t> ixu_; //!< Index of at node i | ||
| 467 | std::size_t nvar_; //!< Number of NLP variables | ||
| 468 | std::size_t nconst_; //!< Number of the NLP constraints | ||
| 469 | std::vector<std::shared_ptr<IpoptInterfaceData>> datas_; //!< Vector of Datas | ||
| 470 | double cost_; //!< Total cost | ||
| 471 | |||
| 472 | IpoptInterface(const IpoptInterface&); | ||
| 473 | |||
| 474 | IpoptInterface& operator=(const IpoptInterface&); | ||
| 475 | }; | ||
| 476 | |||
| 477 | struct IpoptInterfaceData { | ||
| 478 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW | ||
| 479 | |||
| 480 | ✗ | IpoptInterfaceData(const std::size_t nx, const std::size_t ndx, | |
| 481 | const std::size_t nu) | ||
| 482 | ✗ | : x(nx), | |
| 483 | ✗ | xnext(nx), | |
| 484 | ✗ | dx(ndx), | |
| 485 | ✗ | dxnext(ndx), | |
| 486 | ✗ | x_diff(ndx), | |
| 487 | ✗ | u(nu), | |
| 488 | ✗ | Jint_dx(ndx, ndx), | |
| 489 | ✗ | Jint_dxnext(ndx, ndx), | |
| 490 | ✗ | Jdiff_x(ndx, ndx), | |
| 491 | ✗ | Jdiff_xnext(ndx, ndx), | |
| 492 | ✗ | Jg_dx(ndx, ndx), | |
| 493 | ✗ | Jg_dxnext(ndx, ndx), | |
| 494 | ✗ | Jg_u(ndx, ndx), | |
| 495 | ✗ | Jg_ic(ndx, ndx), | |
| 496 | ✗ | FxJint_dx(ndx, ndx), | |
| 497 | ✗ | Ldx(ndx), | |
| 498 | ✗ | Ldxdx(ndx, ndx), | |
| 499 | ✗ | Ldxu(ndx, nu) { | |
| 500 | ✗ | x.setZero(); | |
| 501 | ✗ | xnext.setZero(); | |
| 502 | ✗ | dx.setZero(); | |
| 503 | ✗ | dxnext.setZero(); | |
| 504 | ✗ | x_diff.setZero(); | |
| 505 | ✗ | u.setZero(); | |
| 506 | ✗ | Jint_dx.setZero(); | |
| 507 | ✗ | Jint_dxnext.setZero(); | |
| 508 | ✗ | Jdiff_x.setZero(); | |
| 509 | ✗ | Jdiff_xnext.setZero(); | |
| 510 | ✗ | Jg_dx.setZero(); | |
| 511 | ✗ | Jg_dxnext.setZero(); | |
| 512 | ✗ | Jg_u.setZero(); | |
| 513 | ✗ | Jg_ic.setZero(); | |
| 514 | ✗ | FxJint_dx.setZero(); | |
| 515 | ✗ | Ldx.setZero(); | |
| 516 | ✗ | Ldxdx.setZero(); | |
| 517 | ✗ | Ldxu.setZero(); | |
| 518 | ✗ | } | |
| 519 | |||
| 520 | ✗ | void resize(const std::size_t nx, const std::size_t ndx, | |
| 521 | const std::size_t nu) { | ||
| 522 | ✗ | x.conservativeResize(nx); | |
| 523 | ✗ | xnext.conservativeResize(nx); | |
| 524 | ✗ | dx.conservativeResize(ndx); | |
| 525 | ✗ | dxnext.conservativeResize(ndx); | |
| 526 | ✗ | x_diff.conservativeResize(ndx); | |
| 527 | ✗ | u.conservativeResize(nu); | |
| 528 | ✗ | Jint_dx.conservativeResize(ndx, ndx); | |
| 529 | ✗ | Jint_dxnext.conservativeResize(ndx, ndx); | |
| 530 | ✗ | Jdiff_x.conservativeResize(ndx, ndx); | |
| 531 | ✗ | Jdiff_xnext.conservativeResize(ndx, ndx); | |
| 532 | ✗ | Jg_dx.conservativeResize(ndx, ndx); | |
| 533 | ✗ | Jg_dxnext.conservativeResize(ndx, ndx); | |
| 534 | ✗ | Jg_u.conservativeResize(ndx, ndx); | |
| 535 | ✗ | Jg_ic.conservativeResize(ndx, ndx); | |
| 536 | ✗ | FxJint_dx.conservativeResize(ndx, ndx); | |
| 537 | ✗ | Ldx.conservativeResize(ndx); | |
| 538 | ✗ | Ldxdx.conservativeResize(ndx, ndx); | |
| 539 | ✗ | Ldxu.conservativeResize(ndx, nu); | |
| 540 | ✗ | } | |
| 541 | |||
| 542 | Eigen::VectorXd x; //!< Integrated state | ||
| 543 | Eigen::VectorXd xnext; //!< Integrated state at next node | ||
| 544 | Eigen::VectorXd dx; //!< Increment in the tangent space | ||
| 545 | Eigen::VectorXd dxnext; //!< Increment in the tangent space at next node | ||
| 546 | Eigen::VectorXd x_diff; //!< State difference | ||
| 547 | Eigen::VectorXd u; //!< Control | ||
| 548 | Eigen::MatrixXd Jint_dx; //!< Jacobian of the sum operation w.r.t dx | ||
| 549 | Eigen::MatrixXd | ||
| 550 | Jint_dxnext; //!< Jacobian of the sum operation w.r.t dx at next node | ||
| 551 | Eigen::MatrixXd | ||
| 552 | Jdiff_x; //!< Jacobian of the diff operation w.r.t the first element | ||
| 553 | Eigen::MatrixXd Jdiff_xnext; //!< Jacobian of the diff operation w.r.t the | ||
| 554 | //!< first element at the next node | ||
| 555 | Eigen::MatrixXd Jg_dx; //!< Jacobian of the dynamic constraint w.r.t dx | ||
| 556 | Eigen::MatrixXd | ||
| 557 | Jg_dxnext; //!< Jacobian of the dynamic constraint w.r.t dxnext | ||
| 558 | Eigen::MatrixXd Jg_u; //!< Jacobian of the dynamic constraint w.r.t u | ||
| 559 | Eigen::MatrixXd | ||
| 560 | Jg_ic; //!< Jacobian of the initial condition constraint w.r.t dx | ||
| 561 | Eigen::MatrixXd FxJint_dx; //!< Intermediate computation needed for Jg_ic | ||
| 562 | Eigen::VectorXd Ldx; //!< Jacobian of the cost w.r.t dx | ||
| 563 | Eigen::MatrixXd Ldxdx; //!< Hessian of the cost w.r.t dxdx | ||
| 564 | Eigen::MatrixXd Ldxu; //!< Hessian of the cost w.r.t dxu | ||
| 565 | }; | ||
| 566 | |||
| 567 | } // namespace crocoddyl | ||
| 568 | |||
| 569 | #endif | ||
| 570 |