hpp-constraints  4.9.1
Definition of basic geometric constraints for motion planning
by-substitution.hh
Go to the documentation of this file.
1 // Copyright (c) 2017, Joseph Mirabel
2 // Authors: Joseph Mirabel (joseph.mirabel@laas.fr)
3 //
4 // This file is part of hpp-constraints.
5 // hpp-constraints is free software: you can redistribute it
6 // and/or modify it under the terms of the GNU Lesser General Public
7 // License as published by the Free Software Foundation, either version
8 // 3 of the License, or (at your option) any later version.
9 //
10 // hpp-constraints is distributed in the hope that it will be
11 // useful, but WITHOUT ANY WARRANTY; without even the implied warranty
12 // of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 // General Lesser Public License for more details. You should have
14 // received a copy of the GNU Lesser General Public License along with
15 // hpp-constraints. If not, see <http://www.gnu.org/licenses/>.
16 
17 #ifndef HPP_CONSTRAINTS_SOLVER_IMPL_BY_SUBSTITUTION_HH
18 #define HPP_CONSTRAINTS_SOLVER_IMPL_BY_SUBSTITUTION_HH
19 
20 namespace hpp {
21  namespace constraints {
22  namespace solver {
23  typedef std::numeric_limits<value_type> numeric_limits;
24  typedef Eigen::NumTraits<value_type> NumTraits;
25 
26  template <typename LineSearchType>
27  inline HierarchicalIterative::Status BySubstitution::impl_solve (
28  vectorOut_t arg,
29  bool _optimize,
30  LineSearchType lineSearch) const
31  {
32  bool optimize = _optimize && lastIsOptional_;
33  assert (!arg.hasNaN());
34 
35  explicit_.solve(arg);
36 
37  size_type errorDecreased = 3, iter = 0;
38  value_type previousSquaredNorm;
39  static const value_type dqMinSquaredNorm = NumTraits::dummy_precision();
40  value_type initSquaredNorm = 0;
41 
42  // Variables for optimization only
43  value_type previousCost = numeric_limits::infinity();
44  value_type scaling = 1.;
45  bool onlyLineSearch = false;
46  vector_t qopt;
47 
48  // Fill value and Jacobian
49  computeValue<true> (arg);
50  computeError();
51 
52  bool errorWasBelowThr = (squaredNorm_ < squaredErrorThreshold_);
53  vector_t initArg;
54  if (errorWasBelowThr) {
55  initArg = arg;
56  if (!optimize) iter = std::max (maxIterations_,size_type(2)) - 2;
57  initSquaredNorm = squaredNorm_;
58  }
59 
60  bool errorIsAboveThr = (squaredNorm_ > .25 * squaredErrorThreshold_);
61  if (errorIsAboveThr && reducedDimension_ == 0) return INFEASIBLE;
62  if (optimize && !errorIsAboveThr) qopt = arg;
63 
64  Status status = SUCCESS;
65  while ( (optimize || (errorIsAboveThr && errorDecreased))) {
66  // 1. Maximum iterations
67  if (iter >= maxIterations_) {
68  status = MAX_ITERATION_REACHED;
69  break;
70  }
71  status = SUCCESS;
72 
73  // 2. Compute step
74  // onlyLineSearch is true when we only reduced the scaling.
75  if (!onlyLineSearch) {
76  previousSquaredNorm = squaredNorm_;
77  // Update the jacobian using the jacobian of the explicit system.
78  updateJacobian(arg);
79  computeSaturation(arg);
81  }
82  // Apply scaling to avoid too large steps.
83  if (optimize) dq_ *= scaling;
84  if (dq_.squaredNorm () < dqMinSquaredNorm) {
85  // We assume that the algorithm reached a local minima.
86  status = INFEASIBLE;
87  break;
88  }
89  // 3. Apply line search algorithm for the computed step
90  lineSearch (*this, arg, dq_);
91  explicit_.solve(arg);
92 
93  // 4. Evaluate the error at the new point.
94  computeValue<true> (arg);
95  computeError ();
96 
97  --errorDecreased;
98  if (squaredNorm_ < previousSquaredNorm)
99  errorDecreased = 3;
100  else
101  status = ERROR_INCREASED;
102 
103  errorIsAboveThr = (squaredNorm_ > .25 * squaredErrorThreshold_);
104  // 5. In case of optimization,
105  // - if the constraints is satisfied and the cost decreased, increase
106  // the scaling (amount of confidence in the linear approximation)
107  // - if the constraints is not satisfied, decrease the scaling and
108  // and cancel this step.
109  if (optimize) {
110  if (!errorIsAboveThr) {
111  value_type cost = datas_.back().error.squaredNorm();
112  if (cost < previousCost) {
113  qopt = arg;
114  previousCost = cost;
115  if (scaling < 0.5) scaling *= 2;
116  }
117  onlyLineSearch = false;
118  } else {
119  dq_ /= scaling;
120  scaling *= 0.5;
121  if (qopt.size() > 0) arg = qopt;
122  onlyLineSearch = true;
123  }
124  }
125 
126  ++iter;
127  }
128 
129  if (!optimize && errorWasBelowThr) {
130  if (squaredNorm_ > initSquaredNorm) {
131  arg = initArg;
132  }
133  return SUCCESS;
134  }
135  // If optimizing, qopt is the visited configuration that satisfies the
136  // constraints and has lowest cost.
137  if (optimize && qopt.size() > 0) arg = qopt;
138 
139  assert (!arg.hasNaN());
140  return status;
141  }
142  } // namespace solver
143  } // namespace constraints
144 } // namespace hpp
145 
146 #endif // HPP_CONSTRAINTS_SOLVER_IMPL_BY_SUBSTITUTION_HH
pinocchio::vector_t vector_t
Definition: fwd.hh:45
bool lastIsOptional_
Definition: hierarchical-iterative.hh:604
size_type maxIterations_
Definition: hierarchical-iterative.hh:599
void updateJacobian(vectorIn_t arg) const
Status
Definition: hierarchical-iterative.hh:171
assert(d.lhs()._blocks()==d.rhs()._blocks())
Definition: hierarchical-iterative.hh:175
bool solve(vectorOut_t arg) const
size_type reducedDimension_
Definition: hierarchical-iterative.hh:603
std::numeric_limits< value_type > numeric_limits
Definition: by-substitution.hh:23
value_type squaredNorm_
Definition: hierarchical-iterative.hh:625
void computeSaturation(vectorIn_t arg) const
pinocchio::vectorOut_t vectorOut_t
Definition: fwd.hh:47
std::vector< Data > datas_
Definition: hierarchical-iterative.hh:626
pinocchio::size_type size_type
Definition: fwd.hh:35
Eigen::NumTraits< value_type > NumTraits
Definition: by-substitution.hh:24
Definition: hierarchical-iterative.hh:174
pinocchio::value_type value_type
Definition: fwd.hh:36
value_type squaredErrorThreshold_
Definition: hierarchical-iterative.hh:598
vector_t dq_
Definition: hierarchical-iterative.hh:620