10 #ifdef CROCODDYL_WITH_MULTITHREADING
14 #include "crocoddyl/core/solver-base.hpp"
15 #include "crocoddyl/core/utils/exception.hpp"
48 const std::size_t ndx =
problem_->get_ndx();
49 const std::size_t T =
problem_->get_T();
50 const std::size_t ng_T =
problem_->get_terminalModel()->get_ng_T();
55 const std::vector<std::shared_ptr<ActionModelAbstract> >& models =
57 for (std::size_t t = 0; t < T; ++t) {
58 const std::shared_ptr<ActionModelAbstract>& model = models[t];
59 const std::size_t nu = model->get_nu();
60 const std::size_t ng = model->get_ng();
61 xs_[t] = model->get_state()->zero();
62 us_[t] = Eigen::VectorXd::Zero(nu);
63 fs_[t] = Eigen::VectorXd::Zero(ndx);
64 g_adj_[t] = Eigen::VectorXd::Zero(ng);
66 xs_.back() =
problem_->get_terminalModel()->get_state()->zero();
67 fs_.back() = Eigen::VectorXd::Zero(ndx);
68 g_adj_.back() = Eigen::VectorXd::Zero(ng_T);
71 SolverAbstract::~SolverAbstract() {}
74 START_PROFILER(
"SolverAbstract::resizeData");
75 const std::size_t T =
problem_->get_T();
76 const std::size_t ng_T =
problem_->get_terminalModel()->get_ng_T();
77 const std::vector<std::shared_ptr<ActionModelAbstract> >& models =
79 for (std::size_t t = 0; t < T; ++t) {
80 const std::shared_ptr<ActionModelAbstract>& model = models[t];
81 const std::size_t nu = model->get_nu();
82 const std::size_t ng = model->get_ng();
83 us_[t].conservativeResize(nu);
84 g_adj_[t].conservativeResize(ng);
87 g_adj_.back().conservativeResize(ng_T);
89 STOP_PROFILER(
"SolverAbstract::resizeData");
94 const std::size_t T =
problem_->get_T();
95 const Eigen::VectorXd& x0 =
problem_->get_x0();
96 const std::vector<std::shared_ptr<ActionModelAbstract> >& models =
98 const std::vector<std::shared_ptr<ActionDataAbstract> >& datas =
101 models[0]->get_state()->diff(
xs_[0], x0,
fs_[0]);
102 #ifdef CROCODDYL_WITH_MULTITHREADING
103 #pragma omp parallel for num_threads(problem_->get_nthreads())
105 for (std::size_t t = 0; t < T; ++t) {
106 const std::shared_ptr<ActionModelAbstract>& m = models[t];
107 const std::shared_ptr<ActionDataAbstract>& d = datas[t];
108 m->get_state()->diff(
xs_[t + 1], d->xnext,
fs_[t + 1]);
113 for (std::size_t t = 0; t < T; ++t) {
119 for (std::size_t t = 0; t < T; ++t) {
129 const std::size_t T =
problem_->get_T();
130 const std::vector<std::shared_ptr<ActionModelAbstract> >& models =
132 const std::vector<std::shared_ptr<ActionDataAbstract> >& datas =
137 for (std::size_t t = 0; t < T; ++t) {
138 if (models[t]->get_ng() > 0) {
140 ->g.cwiseMax(models[t]->get_g_lb())
141 .cwiseMin(models[t]->get_g_ub());
146 if (
problem_->get_terminalModel()->get_ng_T() > 0) {
149 ->g.cwiseMax(
problem_->get_terminalModel()->get_g_lb())
150 .cwiseMin(
problem_->get_terminalModel()->get_g_ub());
152 .lpNorm<Eigen::Infinity>();
156 for (std::size_t t = 0; t < T; ++t) {
157 if (models[t]->get_ng() > 0) {
159 ->g.cwiseMax(models[t]->get_g_lb())
160 .cwiseMin(models[t]->get_g_ub());
165 if (
problem_->get_terminalModel()->get_ng_T() > 0) {
168 ->g.cwiseMax(
problem_->get_terminalModel()->get_g_lb())
169 .cwiseMin(
problem_->get_terminalModel()->get_g_ub());
180 const std::size_t T =
problem_->get_T();
181 const std::vector<std::shared_ptr<ActionModelAbstract> >& models =
183 const std::vector<std::shared_ptr<ActionDataAbstract> >& datas =
187 for (std::size_t t = 0; t < T; ++t) {
188 if (models[t]->get_nh() > 0) {
190 std::max(
tmp_feas_, datas[t]->h.lpNorm<Eigen::Infinity>());
193 if (
problem_->get_terminalModel()->get_nh_T() > 0) {
196 problem_->get_terminalData()->h.lpNorm<Eigen::Infinity>());
200 for (std::size_t t = 0; t < T; ++t) {
201 if (models[t]->get_nh() > 0) {
205 if (
problem_->get_terminalModel()->get_nh_T() > 0) {
214 const std::vector<Eigen::VectorXd>& us_warm,
216 const std::size_t T =
problem_->get_T();
218 const std::vector<std::shared_ptr<ActionModelAbstract> >& models =
220 if (xs_warm.size() == 0) {
221 for (std::size_t t = 0; t < T; ++t) {
222 const std::shared_ptr<ActionModelAbstract>& model = models[t];
223 xs_[t] = model->get_state()->zero();
225 xs_.back() =
problem_->get_terminalModel()->get_state()->zero();
227 if (xs_warm.size() != T + 1) {
228 throw_pretty(
"Warm start state vector has wrong dimension, got "
229 << xs_warm.size() <<
" expecting " << (T + 1));
231 for (std::size_t t = 0; t < T; ++t) {
232 const std::size_t nx = models[t]->get_state()->get_nx();
233 if (
static_cast<std::size_t
>(xs_warm[t].size()) != nx) {
234 throw_pretty(
"Invalid argument: "
235 <<
"xs_init[" + std::to_string(t) +
236 "] has wrong dimension ("
238 <<
" provided - it should be equal to " +
239 std::to_string(nx) +
"). ActionModel: "
243 const std::size_t nx =
problem_->get_terminalModel()->get_state()->get_nx();
244 if (
static_cast<std::size_t
>(xs_warm[T].size()) != nx) {
245 throw_pretty(
"Invalid argument: "
246 <<
"xs_init[" + std::to_string(T) +
247 "] (terminal state) has wrong dimension ("
249 <<
" provided - it should be equal to " +
250 std::to_string(nx) +
"). ActionModel: "
253 std::copy(xs_warm.begin(), xs_warm.end(),
xs_.begin());
256 if (us_warm.size() == 0) {
257 for (std::size_t t = 0; t < T; ++t) {
258 const std::shared_ptr<ActionModelAbstract>& model = models[t];
259 const std::size_t nu = model->get_nu();
260 us_[t] = Eigen::VectorXd::Zero(nu);
263 if (us_warm.size() != T) {
264 throw_pretty(
"Warm start control has wrong dimension, got "
265 << us_warm.size() <<
" expecting " << T);
267 for (std::size_t t = 0; t < T; ++t) {
268 const std::shared_ptr<ActionModelAbstract>& model = models[t];
269 const std::size_t nu = model->get_nu();
270 if (
static_cast<std::size_t
>(us_warm[t].size()) != nu) {
271 throw_pretty(
"Invalid argument: "
272 <<
"us_init[" + std::to_string(t) +
273 "] has wrong dimension ("
275 <<
" provided - it should be equal to " +
276 std::to_string(nu) +
"). ActionModel: "
280 std::copy(us_warm.begin(), us_warm.end(),
us_.begin());
286 const std::vector<std::shared_ptr<CallbackAbstract> >& callbacks) {
290 const std::vector<std::shared_ptr<CallbackAbstract> >&
350 "Use get_preg for gettting the primal-dual regularization",
351 double SolverAbstract::get_xreg()
const {
return preg_; })
354 "Use get_preg for gettting the primal-dual regularization",
355 double SolverAbstract::get_ureg()
const {
return preg_; })
370 const std::size_t T =
problem_->get_T();
371 if (xs.size() != T + 1) {
372 throw_pretty(
"Invalid argument: " <<
"xs list has to be of length " +
373 std::to_string(T + 1));
376 const std::size_t nx =
problem_->get_nx();
377 for (std::size_t t = 0; t < T; ++t) {
378 if (
static_cast<std::size_t
>(xs[t].size()) != nx) {
379 throw_pretty(
"Invalid argument: "
380 <<
"xs[" + std::to_string(t) +
"] has wrong dimension ("
382 <<
" provided - it should be " + std::to_string(nx) +
")")
385 if (
static_cast<std::size_t
>(xs[T].size()) != nx) {
386 throw_pretty(
"Invalid argument: "
387 <<
"xs[" + std::to_string(T) +
388 "] (terminal state) has wrong dimension ("
390 <<
" provided - it should be " + std::to_string(nx) +
")")
396 const std::size_t T =
problem_->get_T();
397 if (us.size() != T) {
398 throw_pretty(
"Invalid argument: " <<
"us list has to be of length " +
402 const std::vector<std::shared_ptr<ActionModelAbstract> >& models =
404 for (std::size_t t = 0; t < T; ++t) {
405 const std::shared_ptr<ActionModelAbstract>& model = models[t];
406 const std::size_t nu = model->get_nu();
407 if (
static_cast<std::size_t
>(us[t].size()) != nu) {
408 throw_pretty(
"Invalid argument: "
409 <<
"us[" + std::to_string(t) +
"] has wrong dimension ("
411 <<
" provided - it should be " + std::to_string(nu) +
")")
419 throw_pretty(
"Invalid argument: " <<
"preg value has to be positive.");
426 throw_pretty(
"Invalid argument: " <<
"dreg value has to be positive.");
432 "Use set_preg for gettting the primal-variable regularization",
433 void SolverAbstract::set_xreg(
const double xreg) {
435 throw_pretty(
"Invalid argument: " <<
"xreg value has to be positive.");
442 "Use set_preg for gettting the primal-variable regularization",
443 void SolverAbstract::set_ureg(
const double ureg) {
445 throw_pretty(
"Invalid argument: " <<
"ureg value has to be positive.");
452 if (0. >= th_acceptstep || th_acceptstep > 1) {
454 "Invalid argument: " <<
"th_acceptstep value should between 0 and 1.");
456 th_acceptstep_ = th_acceptstep;
461 throw_pretty(
"Invalid argument: " <<
"th_stop value has to higher than 0.");
467 if (0. > th_gaptol) {
468 throw_pretty(
"Invalid argument: " <<
"th_gaptol value has to be positive.");
477 bool raiseIfNaN(
const double value) {
478 if (std::isnan(value) || std::isinf(value) || value >= 1e30) {
double get_cost() const
Return the cost for the current guess.
std::vector< Eigen::VectorXd > g_adj_
Adjusted inequality bound.
double get_dPhi() const
Return the reduction in the merit function .
double get_th_gaptol() const
Return the threshold for accepting a gap as non-zero.
double dVexp_
Expected reduction in the cost function.
std::vector< Eigen::VectorXd > xs_
State trajectory.
std::size_t get_iter() const
Return the number of iterations performed by the solver.
double get_hfeas() const
Return the equality feasibility for the current guess.
void set_th_stop(const double th_stop)
Modify the tolerance for stopping the algorithm.
double stop_
Value computed by stoppingCriteria()
void set_xs(const std::vector< Eigen::VectorXd > &xs)
Modify the state trajectory .
double get_dVexp() const
Return the expected reduction in the cost function .
double dreg_
Current dual-variable regularization value.
double feas_
Total feasibility for the current guess.
bool is_feasible_
Label that indicates is the iteration is feasible.
EIGEN_MAKE_ALIGNED_OPERATOR_NEW SolverAbstract(std::shared_ptr< ShootingProblem > problem)
Initialize the solver.
std::shared_ptr< ShootingProblem > problem_
optimal control problem
std::vector< Eigen::VectorXd > us_
Control trajectory.
double get_dPhiexp() const
Return the expected reduction in the merit function .
double th_acceptstep_
Threshold used for accepting step.
double get_steplength() const
Return the step length .
void set_th_gaptol(const double th_gaptol)
Modify the threshold for accepting a gap as non-zero.
double get_merit() const
Return the merit for the current guess.
double dPhi_
Reduction in the merit function computed by tryStep()
const std::vector< std::shared_ptr< CallbackAbstract > > & getCallbacks() const
Return the list of callback functions using for diagnostic.
double computeInequalityFeasibility()
Compute the feasibility of the inequality constraints for the current guess.
double get_preg() const
Return the primal-variable regularization.
double get_hfeas_try() const
Return the equality feasibility for the current step length.
double th_stop_
Tolerance for stopping the algorithm.
double computeDynamicFeasibility()
Compute the dynamic feasibility for the current guess .
const Eigen::Vector2d & get_d() const
Return the linear and quadratic terms of the expected improvement.
double dPhiexp_
Expected reduction in the merit function.
enum FeasibilityNorm feasnorm_
void setCandidate(const std::vector< Eigen::VectorXd > &xs_warm=DEFAULT_VECTOR, const std::vector< Eigen::VectorXd > &us_warm=DEFAULT_VECTOR, const bool is_feasible=false)
Set the solver candidate trajectories .
double get_th_stop() const
Return the tolerance for stopping the algorithm.
double dfeas_
Reduction in the feasibility.
void set_feasnorm(const FeasibilityNorm feas_norm)
Modify the current norm used for computed the dynamic and constraint feasibility.
double get_ffeas() const
Return the dynamic feasibility for the current guess.
double get_gfeas() const
Return the inequality feasibility for the current guess.
double cost_
Cost for the current guess.
std::vector< std::shared_ptr< CallbackAbstract > > callbacks_
Callback functions.
void set_us(const std::vector< Eigen::VectorXd > &us)
Modify the control trajectory .
void setCallbacks(const std::vector< std::shared_ptr< CallbackAbstract > > &callbacks)
Set a list of callback functions using for the solver diagnostic.
double th_gaptol_
Threshold limit to check non-zero gaps.
std::size_t iter_
Number of iteration performed by the solver.
double get_feas() const
Return the total feasibility for the current guess.
double dV_
Reduction in the cost function computed by tryStep()
double get_stop() const
Return the stopping-criteria value computed by stoppingCriteria()
double get_ffeas_try() const
Return the dynamic feasibility for the current step length.
void set_dreg(const double dreg)
Modify the dual-variable regularization value.
Eigen::Vector2d d_
LQ approximation of the expected improvement.
double get_dV() const
Return the reduction in the cost function .
const std::vector< Eigen::VectorXd > & get_fs() const
Return the dynamic infeasibility .
const std::vector< Eigen::VectorXd > & get_xs() const
Return the state trajectory .
double get_dfeas() const
Return the reduction in the feasibility.
void set_preg(const double preg)
Modify the primal-variable regularization value.
double ffeas_
Feasibility of the dynamic constraints for the current guess.
double get_gfeas_try() const
Return the inequality feasibility for the current step length.
bool get_is_feasible() const
Return the feasibility status of the trajectory.
double preg_
Current primal-variable regularization value.
const std::vector< Eigen::VectorXd > & get_us() const
Return the control trajectory .
double merit_
Merit for the current guess.
virtual void resizeData()
Resizing the solver data.
std::vector< Eigen::VectorXd > fs_
Gaps/defects between shooting nodes.
void set_th_acceptstep(const double th_acceptstep)
Modify the threshold used for accepting step.
const std::shared_ptr< ShootingProblem > & get_problem() const
Return the shooting problem.
double tmp_feas_
Temporal variables used for computed the feasibility.
double get_th_acceptstep() const
Return the threshold used for accepting a step.
FeasibilityNorm get_feasnorm() const
Return the type of norm used to evaluate the dynamic and constraints feasibility.
double get_dreg() const
Return the dual-variable regularization.
double computeEqualityFeasibility()
Compute the feasibility of the equality constraints for the current guess.