crocoddyl  1.9.0
Contact RObot COntrol by Differential DYnamic programming Library (Crocoddyl)
ddp.cpp
1 // BSD 3-Clause License
3 //
4 // Copyright (C) 2019-2021, LAAS-CNRS, University of Edinburgh, University of Oxford
5 // Copyright note valid unless otherwise stated in individual files.
6 // All rights reserved.
8 
9 #include <iostream>
10 
11 #include "crocoddyl/core/solvers/ddp.hpp"
12 #include "crocoddyl/core/utils/exception.hpp"
13 
14 namespace crocoddyl {
15 
16 SolverDDP::SolverDDP(boost::shared_ptr<ShootingProblem> problem)
17  : SolverAbstract(problem),
18  reg_incfactor_(10.),
19  reg_decfactor_(10.),
20  reg_min_(1e-9),
21  reg_max_(1e9),
22  cost_try_(0.),
23  th_grad_(1e-12),
24  th_stepdec_(0.5),
25  th_stepinc_(0.01) {
26  allocateData();
27 
28  const std::size_t n_alphas = 10;
29  alphas_.resize(n_alphas);
30  for (std::size_t n = 0; n < n_alphas; ++n) {
31  alphas_[n] = 1. / pow(2., static_cast<double>(n));
32  }
33  if (th_stepinc_ < alphas_[n_alphas - 1]) {
34  th_stepinc_ = alphas_[n_alphas - 1];
35  std::cerr << "Warning: th_stepinc has higher value than lowest alpha value, set to "
36  << std::to_string(alphas_[n_alphas - 1]) << std::endl;
37  }
38 }
39 
40 SolverDDP::~SolverDDP() {}
41 
42 bool SolverDDP::solve(const std::vector<Eigen::VectorXd>& init_xs, const std::vector<Eigen::VectorXd>& init_us,
43  const std::size_t maxiter, const bool is_feasible, const double reginit) {
44  START_PROFILER("SolverDDP::solve");
45  if (problem_->is_updated()) {
46  resizeData();
47  }
48  xs_try_[0] = problem_->get_x0(); // it is needed in case that init_xs[0] is infeasible
49  setCandidate(init_xs, init_us, is_feasible);
50 
51  if (std::isnan(reginit)) {
52  xreg_ = reg_min_;
53  ureg_ = reg_min_;
54  } else {
55  xreg_ = reginit;
56  ureg_ = reginit;
57  }
58  was_feasible_ = false;
59 
60  bool recalcDiff = true;
61  for (iter_ = 0; iter_ < maxiter; ++iter_) {
62  while (true) {
63  try {
64  computeDirection(recalcDiff);
65  } catch (std::exception& e) {
66  recalcDiff = false;
68  if (xreg_ == reg_max_) {
69  return false;
70  } else {
71  continue;
72  }
73  }
74  break;
75  }
77 
78  // We need to recalculate the derivatives when the step length passes
79  recalcDiff = false;
80  for (std::vector<double>::const_iterator it = alphas_.begin(); it != alphas_.end(); ++it) {
81  steplength_ = *it;
82 
83  try {
85  } catch (std::exception& e) {
86  continue;
87  }
88  dVexp_ = steplength_ * (d_[0] + 0.5 * steplength_ * d_[1]);
89 
90  if (dVexp_ >= 0) { // descend direction
91  if (d_[0] < th_grad_ || !is_feasible_ || dV_ > th_acceptstep_ * dVexp_) {
94  cost_ = cost_try_;
95  recalcDiff = true;
96  break;
97  }
98  }
99  }
100 
101  if (steplength_ > th_stepdec_) {
103  }
104  if (steplength_ <= th_stepinc_) {
106  if (xreg_ == reg_max_) {
107  STOP_PROFILER("SolverDDP::solve");
108  return false;
109  }
110  }
112 
113  const std::size_t n_callbacks = callbacks_.size();
114  for (std::size_t c = 0; c < n_callbacks; ++c) {
115  CallbackAbstract& callback = *callbacks_[c];
116  callback(*this);
117  }
118 
119  if (was_feasible_ && stop_ < th_stop_) {
120  STOP_PROFILER("SolverDDP::solve");
121  return true;
122  }
123  }
124  STOP_PROFILER("SolverDDP::solve");
125  return false;
126 }
127 
128 void SolverDDP::computeDirection(const bool recalcDiff) {
129  START_PROFILER("SolverDDP::computeDirection");
130  if (recalcDiff) {
131  calcDiff();
132  }
133  backwardPass();
134  STOP_PROFILER("SolverDDP::computeDirection");
135 }
136 
137 double SolverDDP::tryStep(const double steplength) {
138  START_PROFILER("SolverDDP::tryStep");
139  forwardPass(steplength);
140  STOP_PROFILER("SolverDDP::tryStep");
141  return cost_ - cost_try_;
142 }
143 
145  stop_ = 0.;
146  const std::size_t T = this->problem_->get_T();
147  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
148 
149  for (std::size_t t = 0; t < T; ++t) {
150  const std::size_t nu = models[t]->get_nu();
151  if (nu != 0) {
152  stop_ += Qu_[t].squaredNorm();
153  }
154  }
155  return stop_;
156 }
157 
158 const Eigen::Vector2d& SolverDDP::expectedImprovement() {
159  d_.fill(0);
160  const std::size_t T = this->problem_->get_T();
161  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
162  for (std::size_t t = 0; t < T; ++t) {
163  const std::size_t nu = models[t]->get_nu();
164  if (nu != 0) {
165  d_[0] += Qu_[t].dot(k_[t]);
166  d_[1] -= k_[t].dot(Quuk_[t]);
167  }
168  }
169  return d_;
170 }
171 
173  START_PROFILER("SolverDDP::resizeData");
175 
176  const std::size_t T = problem_->get_T();
177  const std::size_t ndx = problem_->get_ndx();
178  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
179  for (std::size_t t = 0; t < T; ++t) {
180  const boost::shared_ptr<ActionModelAbstract>& model = models[t];
181  const std::size_t nu = model->get_nu();
182  Qxu_[t].conservativeResize(ndx, nu);
183  Quu_[t].conservativeResize(nu, nu);
184  Qu_[t].conservativeResize(nu);
185  K_[t].conservativeResize(nu, ndx);
186  k_[t].conservativeResize(nu);
187  us_try_[t].conservativeResize(nu);
188  FuTVxx_p_[t].conservativeResize(nu, ndx);
189  Quuk_[t].conservativeResize(nu);
190  }
191  STOP_PROFILER("SolverDDP::resizeData");
192 }
193 
195  START_PROFILER("SolverDDP::calcDiff");
196  if (iter_ == 0) {
197  problem_->calc(xs_, us_);
198  }
199  cost_ = problem_->calcDiff(xs_, us_);
200 
202  STOP_PROFILER("SolverDDP::calcDiff");
203  return cost_;
204 }
205 
207  START_PROFILER("SolverDDP::backwardPass");
208  const boost::shared_ptr<ActionDataAbstract>& d_T = problem_->get_terminalData();
209  Vxx_.back() = d_T->Lxx;
210  Vx_.back() = d_T->Lx;
211 
212  if (!std::isnan(xreg_)) {
213  Vxx_.back().diagonal().array() += xreg_;
214  }
215 
216  if (!is_feasible_) {
217  Vx_.back().noalias() += Vxx_.back() * fs_.back();
218  }
219  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
220  const std::vector<boost::shared_ptr<ActionDataAbstract> >& datas = problem_->get_runningDatas();
221  for (int t = static_cast<int>(problem_->get_T()) - 1; t >= 0; --t) {
222  const boost::shared_ptr<ActionModelAbstract>& m = models[t];
223  const boost::shared_ptr<ActionDataAbstract>& d = datas[t];
224  const Eigen::MatrixXd& Vxx_p = Vxx_[t + 1];
225  const Eigen::VectorXd& Vx_p = Vx_[t + 1];
226  const std::size_t nu = m->get_nu();
227 
228  FxTVxx_p_.noalias() = d->Fx.transpose() * Vxx_p;
229  START_PROFILER("SolverDDP::Qx");
230  Qx_[t] = d->Lx;
231  Qx_[t].noalias() += d->Fx.transpose() * Vx_p;
232  STOP_PROFILER("SolverDDP::Qx");
233  START_PROFILER("SolverDDP::Qxx");
234  Qxx_[t] = d->Lxx;
235  Qxx_[t].noalias() += FxTVxx_p_ * d->Fx;
236  STOP_PROFILER("SolverDDP::Qxx");
237  if (nu != 0) {
238  FuTVxx_p_[t].noalias() = d->Fu.transpose() * Vxx_p;
239  START_PROFILER("SolverDDP::Qu");
240  Qu_[t] = d->Lu;
241  Qu_[t].noalias() += d->Fu.transpose() * Vx_p;
242  STOP_PROFILER("SolverDDP::Qu");
243  START_PROFILER("SolverDDP::Quu");
244  Quu_[t] = d->Luu;
245  Quu_[t].noalias() += FuTVxx_p_[t] * d->Fu;
246  STOP_PROFILER("SolverDDP::Quu");
247  START_PROFILER("SolverDDP::Qxu");
248  Qxu_[t] = d->Lxu;
249  Qxu_[t].noalias() += FxTVxx_p_ * d->Fu;
250  STOP_PROFILER("SolverDDP::Qxu");
251  if (!std::isnan(ureg_)) {
252  Quu_[t].diagonal().array() += ureg_;
253  }
254  }
255 
256  computeGains(t);
257 
258  Vx_[t] = Qx_[t];
259  Vxx_[t] = Qxx_[t];
260  if (nu != 0) {
261  Quuk_[t].noalias() = Quu_[t] * k_[t];
262  Vx_[t].noalias() -= K_[t].transpose() * Qu_[t];
263  START_PROFILER("SolverDDP::Vxx");
264  Vxx_[t].noalias() -= Qxu_[t] * K_[t];
265  STOP_PROFILER("SolverDDP::Vxx");
266  }
267  Vxx_tmp_ = 0.5 * (Vxx_[t] + Vxx_[t].transpose());
268  Vxx_[t] = Vxx_tmp_;
269 
270  if (!std::isnan(xreg_)) {
271  Vxx_[t].diagonal().array() += xreg_;
272  }
273 
274  // Compute and store the Vx gradient at end of the interval (rollout state)
275  if (!is_feasible_) {
276  Vx_[t].noalias() += Vxx_[t] * fs_[t];
277  }
278 
279  if (raiseIfNaN(Vx_[t].lpNorm<Eigen::Infinity>())) {
280  throw_pretty("backward_error");
281  }
282  if (raiseIfNaN(Vxx_[t].lpNorm<Eigen::Infinity>())) {
283  throw_pretty("backward_error");
284  }
285  }
286  STOP_PROFILER("SolverDDP::backwardPass");
287 }
288 
289 void SolverDDP::forwardPass(const double steplength) {
290  if (steplength > 1. || steplength < 0.) {
291  throw_pretty("Invalid argument: "
292  << "invalid step length, value is between 0. to 1.");
293  }
294  START_PROFILER("SolverDDP::forwardPass");
295  cost_try_ = 0.;
296  const std::size_t T = problem_->get_T();
297  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
298  const std::vector<boost::shared_ptr<ActionDataAbstract> >& datas = problem_->get_runningDatas();
299  for (std::size_t t = 0; t < T; ++t) {
300  const boost::shared_ptr<ActionModelAbstract>& m = models[t];
301  const boost::shared_ptr<ActionDataAbstract>& d = datas[t];
302 
303  m->get_state()->diff(xs_[t], xs_try_[t], dx_[t]);
304  if (m->get_nu() != 0) {
305  us_try_[t].noalias() = us_[t];
306  us_try_[t].noalias() -= k_[t] * steplength;
307  us_try_[t].noalias() -= K_[t] * dx_[t];
308  m->calc(d, xs_try_[t], us_try_[t]);
309  } else {
310  m->calc(d, xs_try_[t]);
311  }
312  xs_try_[t + 1] = d->xnext;
313  cost_try_ += d->cost;
314 
315  if (raiseIfNaN(cost_try_)) {
316  STOP_PROFILER("SolverDDP::forwardPass");
317  throw_pretty("forward_error");
318  }
319  if (raiseIfNaN(xs_try_[t + 1].lpNorm<Eigen::Infinity>())) {
320  STOP_PROFILER("SolverDDP::forwardPass");
321  throw_pretty("forward_error");
322  }
323  }
324 
325  const boost::shared_ptr<ActionModelAbstract>& m = problem_->get_terminalModel();
326  const boost::shared_ptr<ActionDataAbstract>& d = problem_->get_terminalData();
327  m->calc(d, xs_try_.back());
328  cost_try_ += d->cost;
329 
330  if (raiseIfNaN(cost_try_)) {
331  STOP_PROFILER("SolverDDP::forwardPass");
332  throw_pretty("forward_error");
333  }
334  STOP_PROFILER("SolverDDP::forwardPass");
335 }
336 
337 void SolverDDP::computeGains(const std::size_t t) {
338  START_PROFILER("SolverDDP::computeGains");
339  const std::size_t nu = problem_->get_runningModels()[t]->get_nu();
340  if (nu > 0) {
341  START_PROFILER("SolverDDP::Quu_inv");
342  Quu_llt_[t].compute(Quu_[t]);
343  STOP_PROFILER("SolverDDP::Quu_inv");
344  const Eigen::ComputationInfo& info = Quu_llt_[t].info();
345  if (info != Eigen::Success) {
346  STOP_PROFILER("SolverDDP::computeGains");
347  throw_pretty("backward_error");
348  }
349  K_[t] = Qxu_[t].transpose();
350 
351  START_PROFILER("SolverDDP::Quu_inv_Qux");
352  Quu_llt_[t].solveInPlace(K_[t]);
353  STOP_PROFILER("SolverDDP::Quu_inv_Qux");
354  k_[t] = Qu_[t];
355  Quu_llt_[t].solveInPlace(k_[t]);
356  }
357  STOP_PROFILER("SolverDDP::computeGains");
358 }
359 
362  if (xreg_ > reg_max_) {
363  xreg_ = reg_max_;
364  }
365  ureg_ = xreg_;
366 }
367 
370  if (xreg_ < reg_min_) {
371  xreg_ = reg_min_;
372  }
373  ureg_ = xreg_;
374 }
375 
377  const std::size_t T = problem_->get_T();
378  Vxx_.resize(T + 1);
379  Vx_.resize(T + 1);
380  Qxx_.resize(T);
381  Qxu_.resize(T);
382  Quu_.resize(T);
383  Qx_.resize(T);
384  Qu_.resize(T);
385  K_.resize(T);
386  k_.resize(T);
387 
388  xs_try_.resize(T + 1);
389  us_try_.resize(T);
390  dx_.resize(T);
391 
392  FuTVxx_p_.resize(T);
393  Quu_llt_.resize(T);
394  Quuk_.resize(T);
395 
396  const std::size_t ndx = problem_->get_ndx();
397  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
398  for (std::size_t t = 0; t < T; ++t) {
399  const boost::shared_ptr<ActionModelAbstract>& model = models[t];
400  const std::size_t nu = model->get_nu();
401  Vxx_[t] = Eigen::MatrixXd::Zero(ndx, ndx);
402  Vx_[t] = Eigen::VectorXd::Zero(ndx);
403  Qxx_[t] = Eigen::MatrixXd::Zero(ndx, ndx);
404  Qxu_[t] = Eigen::MatrixXd::Zero(ndx, nu);
405  Quu_[t] = Eigen::MatrixXd::Zero(nu, nu);
406  Qx_[t] = Eigen::VectorXd::Zero(ndx);
407  Qu_[t] = Eigen::VectorXd::Zero(nu);
408  K_[t] = MatrixXdRowMajor::Zero(nu, ndx);
409  k_[t] = Eigen::VectorXd::Zero(nu);
410 
411  if (t == 0) {
412  xs_try_[t] = problem_->get_x0();
413  } else {
414  xs_try_[t] = model->get_state()->zero();
415  }
416  us_try_[t] = Eigen::VectorXd::Zero(nu);
417  dx_[t] = Eigen::VectorXd::Zero(ndx);
418 
419  FuTVxx_p_[t] = MatrixXdRowMajor::Zero(nu, ndx);
420  Quu_llt_[t] = Eigen::LLT<Eigen::MatrixXd>(nu);
421  Quuk_[t] = Eigen::VectorXd(nu);
422  }
423  Vxx_.back() = Eigen::MatrixXd::Zero(ndx, ndx);
424  Vxx_tmp_ = Eigen::MatrixXd::Zero(ndx, ndx);
425  Vx_.back() = Eigen::VectorXd::Zero(ndx);
426  xs_try_.back() = problem_->get_terminalModel()->get_state()->zero();
427 
428  FxTVxx_p_ = MatrixXdRowMajor::Zero(ndx, ndx);
429  fTVxx_p_ = Eigen::VectorXd::Zero(ndx);
430 }
431 
433 
435 
436 double SolverDDP::get_regfactor() const { return reg_incfactor_; }
437 
438 double SolverDDP::get_reg_min() const { return reg_min_; }
439 
440 double SolverDDP::get_regmin() const { return reg_min_; }
441 
442 double SolverDDP::get_reg_max() const { return reg_max_; }
443 
444 double SolverDDP::get_regmax() const { return reg_max_; }
445 
446 const std::vector<double>& SolverDDP::get_alphas() const { return alphas_; }
447 
448 double SolverDDP::get_th_stepdec() const { return th_stepdec_; }
449 
450 double SolverDDP::get_th_stepinc() const { return th_stepinc_; }
451 
452 double SolverDDP::get_th_grad() const { return th_grad_; }
453 
454 const std::vector<Eigen::MatrixXd>& SolverDDP::get_Vxx() const { return Vxx_; }
455 
456 const std::vector<Eigen::VectorXd>& SolverDDP::get_Vx() const { return Vx_; }
457 
458 const std::vector<Eigen::MatrixXd>& SolverDDP::get_Qxx() const { return Qxx_; }
459 
460 const std::vector<Eigen::MatrixXd>& SolverDDP::get_Qxu() const { return Qxu_; }
461 
462 const std::vector<Eigen::MatrixXd>& SolverDDP::get_Quu() const { return Quu_; }
463 
464 const std::vector<Eigen::VectorXd>& SolverDDP::get_Qx() const { return Qx_; }
465 
466 const std::vector<Eigen::VectorXd>& SolverDDP::get_Qu() const { return Qu_; }
467 
468 const std::vector<typename MathBaseTpl<double>::MatrixXsRowMajor>& SolverDDP::get_K() const { return K_; }
469 
470 const std::vector<Eigen::VectorXd>& SolverDDP::get_k() const { return k_; }
471 
472 void SolverDDP::set_reg_incfactor(const double regfactor) {
473  if (regfactor <= 1.) {
474  throw_pretty("Invalid argument: "
475  << "reg_incfactor value is higher than 1.");
476  }
477  reg_incfactor_ = regfactor;
478 }
479 
480 void SolverDDP::set_reg_decfactor(const double regfactor) {
481  if (regfactor <= 1.) {
482  throw_pretty("Invalid argument: "
483  << "reg_decfactor value is higher than 1.");
484  }
485  reg_decfactor_ = regfactor;
486 }
487 
488 void SolverDDP::set_regfactor(const double regfactor) {
489  if (regfactor <= 1.) {
490  throw_pretty("Invalid argument: "
491  << "regfactor value is higher than 1.");
492  }
493  set_reg_incfactor(regfactor);
494  set_reg_decfactor(regfactor);
495 }
496 
497 void SolverDDP::set_reg_min(const double regmin) {
498  if (0. > regmin) {
499  throw_pretty("Invalid argument: "
500  << "regmin value has to be positive.");
501  }
502  reg_min_ = regmin;
503 }
504 
505 void SolverDDP::set_regmin(const double regmin) {
506  if (0. > regmin) {
507  throw_pretty("Invalid argument: "
508  << "regmin value has to be positive.");
509  }
510  reg_min_ = regmin;
511 }
512 
513 void SolverDDP::set_reg_max(const double regmax) {
514  if (0. > regmax) {
515  throw_pretty("Invalid argument: "
516  << "regmax value has to be positive.");
517  }
518  reg_max_ = regmax;
519 }
520 
521 void SolverDDP::set_regmax(const double regmax) {
522  if (0. > regmax) {
523  throw_pretty("Invalid argument: "
524  << "regmax value has to be positive.");
525  }
526  reg_max_ = regmax;
527 }
528 
529 void SolverDDP::set_alphas(const std::vector<double>& alphas) {
530  double prev_alpha = alphas[0];
531  if (prev_alpha != 1.) {
532  std::cerr << "Warning: alpha[0] should be 1" << std::endl;
533  }
534  for (std::size_t i = 1; i < alphas.size(); ++i) {
535  double alpha = alphas[i];
536  if (0. >= alpha) {
537  throw_pretty("Invalid argument: "
538  << "alpha values has to be positive.");
539  }
540  if (alpha >= prev_alpha) {
541  throw_pretty("Invalid argument: "
542  << "alpha values are monotonously decreasing.");
543  }
544  prev_alpha = alpha;
545  }
546  alphas_ = alphas;
547 }
548 
549 void SolverDDP::set_th_stepdec(const double th_stepdec) {
550  if (0. >= th_stepdec || th_stepdec > 1.) {
551  throw_pretty("Invalid argument: "
552  << "th_stepdec value should between 0 and 1.");
553  }
554  th_stepdec_ = th_stepdec;
555 }
556 
557 void SolverDDP::set_th_stepinc(const double th_stepinc) {
558  if (0. >= th_stepinc || th_stepinc > 1.) {
559  throw_pretty("Invalid argument: "
560  << "th_stepinc value should between 0 and 1.");
561  }
562  th_stepinc_ = th_stepinc;
563 }
564 
565 void SolverDDP::set_th_grad(const double th_grad) {
566  if (0. > th_grad) {
567  throw_pretty("Invalid argument: "
568  << "th_grad value has to be positive.");
569  }
570  th_grad_ = th_grad;
571 }
572 
573 } // namespace crocoddyl
crocoddyl::SolverDDP::th_grad_
double th_grad_
Tolerance of the expected gradient used for testing the step.
Definition: ddp.hpp:327
crocoddyl::SolverDDP::computeGains
virtual void computeGains(const std::size_t t)
Compute the feedforward and feedback terms using a Cholesky decomposition.
Definition: ddp.cpp:337
crocoddyl::SolverDDP::fTVxx_p_
Eigen::VectorXd fTVxx_p_
Store the value of .
Definition: ddp.hpp:323
crocoddyl::SolverAbstract::th_acceptstep_
double th_acceptstep_
Threshold used for accepting step.
Definition: solver-base.hpp:326
crocoddyl::SolverDDP::set_th_stepdec
void set_th_stepdec(const double th_step)
Modify the step-length threshold used to decrease regularization.
Definition: ddp.cpp:549
crocoddyl::SolverDDP::set_reg_incfactor
void set_reg_incfactor(const double reg_factor)
Modify the regularization factor used to increase the damping value.
Definition: ddp.cpp:472
crocoddyl::SolverDDP::get_Qu
const std::vector< Eigen::VectorXd > & get_Qu() const
Return the Jacobian of the Hamiltonian function .
Definition: ddp.cpp:466
crocoddyl::SolverDDP::th_stepinc_
double th_stepinc_
Step-length threshold used to increase regularization.
Definition: ddp.hpp:329
crocoddyl::SolverDDP::Vxx_tmp_
Eigen::MatrixXd Vxx_tmp_
Temporary variable for ensuring symmetry of Vxx.
Definition: ddp.hpp:309
crocoddyl::SolverDDP::set_reg_decfactor
void set_reg_decfactor(const double reg_factor)
Modify the regularization factor used to decrease the damping value.
Definition: ddp.cpp:480
crocoddyl::SolverDDP::set_alphas
void set_alphas(const std::vector< double > &alphas)
Modify the set of step lengths using by the line-search procedure.
Definition: ddp.cpp:529
crocoddyl::CallbackAbstract
Abstract class for solver callbacks.
Definition: solver-base.hpp:342
crocoddyl::SolverDDP::get_reg_incfactor
double get_reg_incfactor() const
Return the regularization factor used to increase the damping value.
Definition: ddp.cpp:432
crocoddyl::SolverAbstract
Abstract class for optimal control solvers.
Definition: solver-base.hpp:51
crocoddyl::SolverDDP::get_Vxx
const std::vector< Eigen::MatrixXd > & get_Vxx() const
Return the Hessian of the Value function .
Definition: ddp.cpp:454
crocoddyl::SolverAbstract::computeDynamicFeasibility
double computeDynamicFeasibility()
Compute the dynamic feasibility for the current guess .
Definition: solver-base.cpp:66
crocoddyl::SolverDDP::forwardPass
virtual void forwardPass(const double stepLength)
Run the forward pass or rollout.
Definition: ddp.cpp:289
crocoddyl::SolverDDP::calcDiff
virtual double calcDiff()
Update the Jacobian, Hessian and feasibility of the optimal control problem.
Definition: ddp.cpp:194
crocoddyl::SolverDDP::alphas_
std::vector< double > alphas_
Set of step lengths using by the line-search procedure.
Definition: ddp.hpp:326
crocoddyl::SolverDDP::FxTVxx_p_
MatrixXdRowMajor FxTVxx_p_
Store the value of .
Definition: ddp.hpp:320
crocoddyl::SolverDDP::solve
virtual bool solve(const std::vector< Eigen::VectorXd > &init_xs=DEFAULT_VECTOR, const std::vector< Eigen::VectorXd > &init_us=DEFAULT_VECTOR, const std::size_t maxiter=100, const bool is_feasible=false, const double regInit=1e-9)
Compute the optimal trajectory as lists of and terms.
Definition: ddp.cpp:42
crocoddyl::SolverAbstract::is_feasible_
bool is_feasible_
Label that indicates is the iteration is feasible.
Definition: solver-base.hpp:316
crocoddyl::SolverDDP::get_th_stepinc
double get_th_stepinc() const
Return the step-length threshold used to increase regularization.
Definition: ddp.cpp:450
crocoddyl::SolverAbstract::d_
Eigen::Vector2d d_
LQ approximation of the expected improvement.
Definition: solver-base.hpp:320
crocoddyl::SolverDDP::reg_decfactor_
double reg_decfactor_
Regularization factor used to decrease the damping value.
Definition: ddp.hpp:298
crocoddyl::SolverDDP::get_reg_decfactor
double get_reg_decfactor() const
Return the regularization factor used to decrease the damping value.
Definition: ddp.cpp:434
crocoddyl::SolverDDP::dx_
std::vector< Eigen::VectorXd > dx_
State error during the roll-out/forward-pass (size T)
Definition: ddp.hpp:305
crocoddyl::SolverDDP::Quuk_
std::vector< Eigen::VectorXd > Quuk_
Store the values of.
Definition: ddp.hpp:325
crocoddyl::SolverAbstract::resizeData
virtual void resizeData()
Resizing the solver data.
Definition: solver-base.cpp:56
crocoddyl::SolverDDP::tryStep
virtual double tryStep(const double steplength=1)
Try a predefined step length and compute its cost improvement .
Definition: ddp.cpp:137
crocoddyl::SolverAbstract::iter_
std::size_t iter_
Number of iteration performed by the solver.
Definition: solver-base.hpp:328
crocoddyl::SolverAbstract::ureg_
double ureg_
Current control regularization values.
Definition: solver-base.hpp:322
crocoddyl::SolverAbstract::stop_
double stop_
Value computed by stoppingCriteria()
Definition: solver-base.hpp:319
crocoddyl::SolverDDP::Qu_
std::vector< Eigen::VectorXd > Qu_
Gradient of the Hamiltonian .
Definition: ddp.hpp:315
crocoddyl::SolverDDP::Qxu_
std::vector< Eigen::MatrixXd > Qxu_
Hessian of the Hamiltonian .
Definition: ddp.hpp:312
crocoddyl::SolverDDP::Qx_
std::vector< Eigen::VectorXd > Qx_
Gradient of the Hamiltonian .
Definition: ddp.hpp:314
crocoddyl::SolverDDP::Quu_
std::vector< Eigen::MatrixXd > Quu_
Hessian of the Hamiltonian .
Definition: ddp.hpp:313
crocoddyl::SolverAbstract::steplength_
double steplength_
Current applied step-length.
Definition: solver-base.hpp:323
crocoddyl::SolverDDP::cost_try_
double cost_try_
Total cost computed by line-search procedure.
Definition: ddp.hpp:302
crocoddyl::SolverAbstract::setCandidate
void setCandidate(const std::vector< Eigen::VectorXd > &xs_warm=DEFAULT_VECTOR, const std::vector< Eigen::VectorXd > &us_warm=DEFAULT_VECTOR, const bool is_feasible=false)
Set the solver candidate trajectories .
Definition: solver-base.cpp:103
crocoddyl::SolverAbstract::th_stop_
double th_stop_
Tolerance for stopping the algorithm.
Definition: solver-base.hpp:327
crocoddyl::SolverDDP::backwardPass
virtual void backwardPass()
Run the backward pass (Riccati sweep)
Definition: ddp.cpp:206
crocoddyl::SolverDDP::reg_max_
double reg_max_
Maximum allowed regularization value.
Definition: ddp.hpp:300
crocoddyl::SolverDDP::computeDirection
virtual void computeDirection(const bool recalc=true)
Compute the search direction for the current guess .
Definition: ddp.cpp:128
crocoddyl::SolverDDP::set_th_stepinc
void set_th_stepinc(const double th_step)
Modify the step-length threshold used to increase regularization.
Definition: ddp.cpp:557
crocoddyl::SolverDDP::us_try_
std::vector< Eigen::VectorXd > us_try_
Control trajectory computed by line-search procedure.
Definition: ddp.hpp:304
crocoddyl::SolverDDP::expectedImprovement
virtual const Eigen::Vector2d & expectedImprovement()
Return the expected improvement from a given current search direction .
Definition: ddp.cpp:158
crocoddyl::SolverAbstract::ffeas_
double ffeas_
Feasibility of the dynamic constraints.
Definition: solver-base.hpp:330
crocoddyl::SolverAbstract::problem_
boost::shared_ptr< ShootingProblem > problem_
optimal control problem
Definition: solver-base.hpp:311
crocoddyl::SolverDDP::SolverDDP
SolverDDP(boost::shared_ptr< ShootingProblem > problem)
Initialize the DDP solver.
Definition: ddp.cpp:16
crocoddyl::SolverAbstract::callbacks_
std::vector< boost::shared_ptr< CallbackAbstract > > callbacks_
Callback functions.
Definition: solver-base.hpp:315
crocoddyl::SolverDDP::get_reg_max
double get_reg_max() const
Return the maximum regularization value.
Definition: ddp.cpp:442
crocoddyl::SolverDDP::set_reg_max
void set_reg_max(const double regmax)
Modify the maximum regularization value.
Definition: ddp.cpp:513
crocoddyl::SolverDDP::K_
std::vector< MatrixXdRowMajor > K_
Feedback gains .
Definition: ddp.hpp:316
crocoddyl::SolverDDP::set_th_grad
void set_th_grad(const double th_grad)
Modify the tolerance of the expected gradient used for testing the step.
Definition: ddp.cpp:565
crocoddyl::SolverAbstract::xreg_
double xreg_
Current state regularization value.
Definition: solver-base.hpp:321
crocoddyl::SolverDDP::Quu_llt_
std::vector< Eigen::LLT< Eigen::MatrixXd > > Quu_llt_
Cholesky LLT solver.
Definition: ddp.hpp:324
crocoddyl::SolverAbstract::dVexp_
double dVexp_
Expected cost reduction.
Definition: solver-base.hpp:325
crocoddyl::SolverDDP::get_Qx
const std::vector< Eigen::VectorXd > & get_Qx() const
Return the Jacobian of the Hamiltonian function .
Definition: ddp.cpp:464
crocoddyl::SolverDDP::get_th_stepdec
double get_th_stepdec() const
Return the step-length threshold used to decrease regularization.
Definition: ddp.cpp:448
crocoddyl::SolverAbstract::us_
std::vector< Eigen::VectorXd > us_
Control trajectory.
Definition: solver-base.hpp:313
crocoddyl::SolverAbstract::fs_
std::vector< Eigen::VectorXd > fs_
Gaps/defects between shooting nodes.
Definition: solver-base.hpp:314
crocoddyl::SolverAbstract::cost_
double cost_
Total cost.
Definition: solver-base.hpp:318
crocoddyl::SolverDDP::resizeData
virtual void resizeData()
Resizing the solver data.
Definition: ddp.cpp:172
crocoddyl::SolverAbstract::xs_
std::vector< Eigen::VectorXd > xs_
State trajectory.
Definition: solver-base.hpp:312
crocoddyl::SolverDDP::get_alphas
const std::vector< double > & get_alphas() const
Return the set of step lengths using by the line-search procedure.
Definition: ddp.cpp:446
crocoddyl::SolverDDP::reg_min_
double reg_min_
Minimum allowed regularization value.
Definition: ddp.hpp:299
crocoddyl::SolverDDP::xs_try_
std::vector< Eigen::VectorXd > xs_try_
State trajectory computed by line-search procedure.
Definition: ddp.hpp:303
crocoddyl::SolverDDP::allocateData
virtual void allocateData()
Allocate all the internal data needed for the solver.
Definition: ddp.cpp:376
crocoddyl::SolverDDP::FuTVxx_p_
std::vector< MatrixXdRowMajor > FuTVxx_p_
Store the values of per each running node.
Definition: ddp.hpp:322
crocoddyl::SolverDDP::Vx_
std::vector< Eigen::VectorXd > Vx_
Gradient of the Value function .
Definition: ddp.hpp:310
crocoddyl::SolverDDP::increaseRegularization
void increaseRegularization()
Increase the state and control regularization values by a regfactor_ factor.
Definition: ddp.cpp:360
crocoddyl::SolverDDP::get_k
const std::vector< Eigen::VectorXd > & get_k() const
Return the feedforward gains .
Definition: ddp.cpp:470
crocoddyl::SolverDDP::get_Vx
const std::vector< Eigen::VectorXd > & get_Vx() const
Return the Hessian of the Value function .
Definition: ddp.cpp:456
crocoddyl::SolverAbstract::dV_
double dV_
Cost reduction obtained by tryStep()
Definition: solver-base.hpp:324
crocoddyl::SolverDDP::get_Quu
const std::vector< Eigen::MatrixXd > & get_Quu() const
Return the Hessian of the Hamiltonian function .
Definition: ddp.cpp:462
crocoddyl::SolverDDP::decreaseRegularization
void decreaseRegularization()
Decrease the state and control regularization values by a regfactor_ factor.
Definition: ddp.cpp:368
crocoddyl::SolverDDP::get_th_grad
double get_th_grad() const
Return the tolerance of the expected gradient used for testing the step.
Definition: ddp.cpp:452
crocoddyl::SolverDDP::get_Qxu
const std::vector< Eigen::MatrixXd > & get_Qxu() const
Return the Hessian of the Hamiltonian function .
Definition: ddp.cpp:460
crocoddyl::SolverDDP::Qxx_
std::vector< Eigen::MatrixXd > Qxx_
Hessian of the Hamiltonian .
Definition: ddp.hpp:311
crocoddyl::SolverDDP::th_stepdec_
double th_stepdec_
Step-length threshold used to decrease regularization.
Definition: ddp.hpp:328
crocoddyl::SolverDDP::get_Qxx
const std::vector< Eigen::MatrixXd > & get_Qxx() const
Return the Hessian of the Hamiltonian function .
Definition: ddp.cpp:458
crocoddyl::SolverDDP::Vxx_
std::vector< Eigen::MatrixXd > Vxx_
Hessian of the Value function .
Definition: ddp.hpp:308
crocoddyl::SolverDDP::stoppingCriteria
virtual double stoppingCriteria()
Return a positive value that quantifies the algorithm termination.
Definition: ddp.cpp:144
crocoddyl::SolverDDP::get_K
const std::vector< MatrixXdRowMajor > & get_K() const
Return the feedback gains .
Definition: ddp.cpp:468
crocoddyl::SolverAbstract::was_feasible_
bool was_feasible_
Label that indicates in the previous iterate was feasible.
Definition: solver-base.hpp:317
crocoddyl::SolverDDP::reg_incfactor_
double reg_incfactor_
Regularization factor used to increase the damping value.
Definition: ddp.hpp:297
crocoddyl::SolverDDP::k_
std::vector< Eigen::VectorXd > k_
Feed-forward terms .
Definition: ddp.hpp:317