DirectTrajectoryOptimization  v0.3
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Pages
ipopt.hpp
Go to the documentation of this file.
1 /***********************************************************************************
2 Copyright (c) 2017, Diego Pardo. All rights reserved.
3 
4 Redistribution and use in source and binary forms, with or without modification,
5 are permitted provided that the following conditions are met:
6  * Redistributions of source code must retain the above copyright notice,
7  this list of conditions and the following disclaimer.
8  * Redistributions in binary form must reproduce the above copyright notice,
9  this list of conditions and the following disclaimer in the documentation
10  and/or other materials provided with the distribution.
11  * Neither the name of ETH ZURICH nor the names of its contributors may be used
12  to endorse or promote products derived from this software without specific
13  prior written permission.
14 
15 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
16 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
18 SHALL ETH ZURICH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
19 OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
20 GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
23 EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 ***************************************************************************************/
25 
31 #ifndef DT_IPOPT_INTERFACE_HPP_
32 #define DT_IPOPT_INTERFACE_HPP_
33 
35 #include <IpIpoptApplication.hpp>
36 #include <IpTNLP.hpp>
37 
38 using namespace Ipopt;
39 
40 namespace DirectTrajectoryOptimization {
41 
42 // forward declaration
43 template<typename DIMENSIONS>
44 class DTOMethodInterface;
45 
49 template<typename DIMENSIONS>
50 class DTOIpoptInterface: public TNLP , public DTOSolverInterface<DIMENSIONS> {
51 
52 public:
53 
54  EIGEN_MAKE_ALIGNED_OPERATOR_NEW
55 
56  typedef typename DIMENSIONS::state_vector_array_t state_vector_array_t;
57  typedef typename DIMENSIONS::control_vector_array_t control_vector_array_t;
58 
65  bool ipopt_verbose) :
66  _method(method) {
67  this->_verbose = ipopt_verbose;
68  _nlpSolver = new Ipopt::IpoptApplication();
69  }
70 
72 
73  /* Workaround*/
74  /* the shared pointer must destroy everything, including the Ipopt SmartPtr */
75  Ipopt::Referencer* t=NULL;
76  this->ReleaseRef(t);
77  delete t;
78  }
79 
80  virtual void InitializeSolver(const std::string & problemName,
81  const std::string & outputFile,
82  const std::string & paramFile /*= "" */,
83  const bool &computeJacobian) override;
84 
85  virtual void Solve(bool warminit = false) override;
86 
87  virtual void GetDTOSolution(state_vector_array_t &yTraj,
88  control_vector_array_t &uTraj, Eigen::VectorXd &hTraj) override;
89 
90  virtual void GetDTOSolution(state_vector_array_t & y_trajectory,
91  control_vector_array_t & u_trajectory,
92  Eigen::VectorXd & h_trajectory,
93  std::vector<int> & phaseId) override;
94 
95  virtual void GetFVector(Eigen::VectorXd &fVec) override;
96 
97 // void WarmInitialization(
98 // const state_vector_array_t & y_sol,
99 // const control_vector_array_t & u_sol,
100 // const Eigen::VectorXd & h_sol,
101 // const Eigen::VectorXi & x_state,
102 // const Eigen::VectorXd & x_mul,
103 // const Eigen::VectorXi & f_state,
104 // const Eigen::VectorXd & f_mul) override;
105 
106  virtual void InitializeDecisionVariablesFromTrajectory( const state_vector_array_t & y_sol,
107  const control_vector_array_t & u_sol, const Eigen::VectorXd &h_sol) override;
108 
109  void SetupSolverParameters() override;
110 
111 private:
112 
113  // private helper functions
114  void SetupIpoptVariables();
115  void SetupNLP();
116 
117 
118  void SetSparsityVariables();
119 
120  void readParametersFromFile(const std::string & p_file);
121 
122  // IPOPT functions
123  virtual bool get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
124  Index& nnz_h_lag, IndexStyleEnum& index_style) override;
125 
126  virtual bool get_bounds_info(Index n, Number* x_l, Number* x_u, Index m,
127  Number *g_l, Number* g_u) override;
128 
129  virtual bool get_starting_point(Index n, bool init_x, Number* x,
130  bool init_z, Number* z_L, Number* z_U, Index m, bool init_lambda,
131  Number* lambda) override;
132 
133  virtual bool eval_f(Index n, const Number* x, bool new_x, Number& obj_value)
134  override;
135 
136  virtual bool eval_grad_f(Index n, const Number* x, bool new_x,
137  Number* grad_f) override;
138 
139  virtual bool eval_g(Index n, const Number* x, bool new_x, Index m,
140  Number* g) override;
141 
142  virtual bool eval_jac_g(Index n, const Number* x, bool new_x, Index m,
143  Index nele_jac, Index* iRow, Index* jCol, Number* values) override;
144 
145  virtual void finalize_solution(SolverReturn status, Index n,
146  const Number* x, const Number* z_L, const Number* z_U, Index m,
147  const Number* g, const Number* lambda, Number obj_value,
148  const IpoptData* ip_data, IpoptCalculatedQuantities* ip_cq)
149  override;
150 
151  // private member variables
152  bool use_computeJacobian = false;
153 
154  Ipopt::SmartPtr<IpoptApplication> _nlpSolver;
155  std::shared_ptr<DTOMethodInterface<DIMENSIONS> > _method;
156 
157  int _lenX = 0; // size of decision variables vector
158  int _lenConstraint = 0; // size of constraints vector
159 
160  int _nnzCostJac = 0; // number of non-zero elements of cost jacobian
161  int _nnzConstraintJac = 0; // number of non-zero elements of constraints jacobian
162 
163  Eigen::VectorXd _valX;
164  double _valCost = 0.0; // current value of cost
165  Eigen::VectorXd _valConstraint; // current value of constraints
166 
167  Eigen::VectorXd _valCostJac; // current value of cost jacobian
168  Eigen::VectorXd _valConstraintJac; // current value of constraint jacobian
169  Eigen::VectorXd _lamdaConstraints;
170 
171  Eigen::VectorXi _jCostJac; // row indices of cost jacobian
172  Eigen::VectorXi _iConstraintJac; // row indices of constraint jacobian
173  Eigen::VectorXi _jConstraintJac; // col indices of constraint jacobian
174 
175  // output parameters
176  std::string ippt_name_of_problem = "";
177  std::string ippt_name_of_output_file= "";
178  double ippt_print_level = 5;
179  double ippt_file_print_level = 5;
180  std::string ippt_print_user_options = "no";
181  std::string ippt_print_options_documentation= "no";
182  std::string ippt_print_timing_statistics = "yes";
183 
184  // convergence parameters
185  double ippt_tolerance = 1e-4;
186  double ippt_max_iter = 10000;
187  double ippt_max_cpu_time = 1e6;
188  std::string ippt_mu_strategy = "adaptive"; // monotone, adaptive
189  double ippt_dual_inf_tol = 1;
190  double ippt_constr_viol_tol = 1e-6;
191  double ippt_compl_inf_tol = 1e-6;
192  double ippt_acceptable_tol = 1e-6;
193  double ippt_acceptable_iter = 30;
194 
195  // NLP Scaling parameters
196  std::string ippt_nlp_scaling_method = "gradient-based"; //none, gradient-based, user-scaling, equilibration-based (only with MC19)
197  double ippt_obj_scaling_factor = 1;
198  double ippt_nlp_scaling_max_gradient= 100; // if max grad above this value, scaling will be performed
199  double ippt_nlp_scaling_obj_target_gradient = 0;
200 
201  // linear solver parameters
202  std::string ippt_linear_solver = "ma57"; // ma27/57/77/86/97, pardiso, wsmp, mumps, custom
203  std::string ippt_fast_step_computation = "no"; // yes
204 
205  // derivative checker parameters
206  std::string ippt_derivative_test = "none"; // none, first-order
207  double ippt_derivative_test_tol = 1e-4;
208  double point_perturbation_radius = 0.01;
209  //double ippt_derivative_test_perturbation = std::sqrt(Eigen::NumTraits<double>::epsilon());
210 // double ippt_derivative_test_perturbation = std::sqrt(Eigen::NumTraits<double>::epsilon());
211 
212  // Hessian approximation parameters
213  std::string ippt_hessian_approximation = "limited-memory"; // (exact) <- if implemented in code
214  double ippt_limited_memory_max_history = 6;
215  double ippt_limited_memory_max_skipping= 2;
216 
217  bool ippt_rethrowNonIpoptException = false;
218 };
219 
220 template<typename DIMENSIONS>
222  const std::string & problemName,
223  const std::string & outputFile,
224  const std::string & paramFile /*= "" */,
225  const bool &computeJacobian) {
226  use_computeJacobian = computeJacobian;
227  ippt_name_of_problem = problemName;
228  ippt_name_of_output_file = outputFile;
229 
230  _method->Initialize(); // ensure that the method variables are ready
231 
232  SetupIpoptVariables();
233  SetupNLP();
234 
235  if (paramFile!="") {
236  readParametersFromFile(paramFile);
237  }
238 
239  SetupSolverParameters();
240 
241  return;
242 }
243 
244 template<typename DIMENSIONS>
246 
247  _lenX = _method->myNLP.GetNumVars();
248  _lenConstraint = _method->myNLP.GetNumF();
249  _valConstraint.resize(_lenConstraint);
250 }
251 
252 template<typename DIMENSIONS>
253 void DTOIpoptInterface<DIMENSIONS>::SetupNLP() {
254 
255  SetSparsityVariables();
256  return;
257 }
258 
259 template <class DIMENSIONS>
261  const control_vector_array_t & u_sol, const Eigen::VectorXd &h_sol) {
262 
263  _method->SetDecisionVariablesFromStateControlIncrementsTrajectories(this->_valX,y_sol,u_sol,h_sol);
264 
265 }
266 
267 
268 template<typename DIMENSIONS>
270 
271  // OUTPUT
272  _nlpSolver->Options()->SetStringValue("output_file", ippt_name_of_output_file);
273  _nlpSolver->Options()->SetIntegerValue("print_level", ippt_print_level);
274  _nlpSolver->Options()->SetIntegerValue("file_print_level", ippt_file_print_level);
275  _nlpSolver->Options()->SetStringValue("print_user_options", ippt_print_user_options);
276  _nlpSolver->Options()->SetStringValue("print_options_documentation", ippt_print_options_documentation);
277  _nlpSolver->Options()->SetStringValue("print_timing_statistics", ippt_print_timing_statistics);
278 
279  // CONVERGENCE
280  _nlpSolver->Options()->SetNumericValue("tol",ippt_tolerance);
281  _nlpSolver->Options()->SetIntegerValue("max_iter",ippt_max_iter);
282  _nlpSolver->Options()->SetNumericValue("max_cpu_time",ippt_max_cpu_time);
283  _nlpSolver->Options()->SetStringValue("mu_strategy",ippt_mu_strategy);
284  _nlpSolver->Options()->SetNumericValue("dual_inf_tol",ippt_dual_inf_tol);
285  _nlpSolver->Options()->SetNumericValue("constr_viol_tol", ippt_constr_viol_tol);
286  _nlpSolver->Options()->SetNumericValue("compl_inf_tol", ippt_compl_inf_tol);
287  _nlpSolver->Options()->SetNumericValue("acceptable_tol", ippt_acceptable_tol);
288  _nlpSolver->Options()->SetIntegerValue("acceptable_iter", ippt_acceptable_iter);
289 
290  // NLP Scaling
291  _nlpSolver->Options()->SetStringValue("nlp_scaling_method", ippt_nlp_scaling_method);
292  _nlpSolver->Options()->SetNumericValue("obj_scaling_factor", ippt_obj_scaling_factor);
293  _nlpSolver->Options()->SetNumericValue("nlp_scaling_max_gradient", ippt_nlp_scaling_max_gradient);
294  _nlpSolver->Options()->SetNumericValue("nlp_scaling_obj_target_gradient", ippt_nlp_scaling_obj_target_gradient);
295 
296  // LINEAR SOLVER
297  _nlpSolver->Options()->SetStringValue("linear_solver", ippt_linear_solver);
298  _nlpSolver->Options()->SetStringValue("fast_step_computation", ippt_fast_step_computation);
299 
300  // DERIVATIVE checker
301  _nlpSolver->Options()->SetStringValue("derivative_test", ippt_derivative_test);
302  _nlpSolver->Options()->SetNumericValue("derivative_test_tol", ippt_derivative_test_tol);
303  _nlpSolver->Options()->SetNumericValue("point_perturbation_radius", point_perturbation_radius);
304  //_nlpSolver->Options()->SetNumericValue("derivative_test_perturbation",ippt_derivative_test_perturbation);
305 
306  // HESSIAN approximation
307  _nlpSolver->Options()->SetStringValue("hessian_approximation",ippt_hessian_approximation);
308  _nlpSolver->Options()->SetIntegerValue("limited_memory_max_history",ippt_limited_memory_max_history);
309  _nlpSolver->Options()->SetIntegerValue("limited_memory_max_skipping",ippt_limited_memory_max_skipping);
310 
311  _nlpSolver->RethrowNonIpoptException(ippt_rethrowNonIpoptException);
312 
313  if (use_computeJacobian) {
314  _nlpSolver->Options()->SetStringValue("jacobian_approximation",
315  "finite-difference-values");
316  }
317 }
318 
319 template<typename DIMENSIONS>
320 void DTOIpoptInterface<DIMENSIONS>::readParametersFromFile(const std::string & paramFile) {
321 
322  //ToDo : Disabled after new base_method_interface
323  // _app->Options->SetStringValue("option_file_name", paramFile);
324 }
325 
326 template<typename DIMENSIONS>
327 void DTOIpoptInterface<DIMENSIONS>::SetSparsityVariables() {
328 
329  // ipopt does not exploit sparsity in the cost function!
330  _nnzCostJac = _lenX; // TODO inconsistent with the name nnz
331  //but we need to map the indexes from the base method
332  _jCostJac = _method->myNLP.GetjGvarCost();
333 
334  if (use_computeJacobian) {
335 
336  _nnzConstraintJac = _lenConstraint * _lenX;
337  _iConstraintJac.resize(_nnzConstraintJac);
338  _jConstraintJac.resize(_nnzConstraintJac);
339 
340  for (int i = 0; i < _nnzConstraintJac; i++) {
341  _iConstraintJac[i] = i % _lenConstraint;
342  _jConstraintJac[i] = floor(i/_lenConstraint);
343  }
344  } else {
345  _nnzConstraintJac = _method->myNLP.GetDimG();
346  _iConstraintJac = _method->myNLP.GetiGfun();
347  _jConstraintJac = _method->myNLP.GetjGvar();
348  }
349 
350  _valX.resize(_lenX);
351  _valCostJac.resize(_nnzCostJac);
352  _valConstraintJac.resize(_nnzConstraintJac);
353 
354  // some assertions to make sure sizes are as we expect
355  assert(_iConstraintJac.size() == _nnzConstraintJac);
356  assert(_jConstraintJac.size() == _nnzConstraintJac);
357 // assert(_jCostJac.size() == _nnzCostJac); Not true for IPOPT where nonspare version is used
358 
359  if (this->_verbose) {
360  std::cout << "[VERBOSE] _iConstraintJac : " << _iConstraintJac.transpose() << std::endl;
361  std::cout << "[VERBOSE] _jConstraintJac : " << _jConstraintJac.transpose() << std::endl;
362  std::getchar();
363  }
364 
365  return;
366 }
367 
368 template<typename DIMENSIONS>
370 
371  ApplicationReturnStatus status;
372 
373  // Initialize the problem
374  status = _nlpSolver->Initialize();
375 
376  if (status == Solve_Succeeded) {
377  std::cout << std::endl << std::endl
378  << "*** Initialized successfully -- starting NLP." << std::endl;
379  } else {
380  std::cout << std::endl << std::endl
381  << "*** Error during initialization!" << std::endl;
382  exit(EXIT_FAILURE);
383  }
384 
385  // Solve this!
386  status = _nlpSolver->OptimizeTNLP(this);
387 
388  if (status == Solve_Succeeded) {
389  std::cout << std::endl << std::endl << "*** The problem solved!"
390  << std::endl;
391  } else {
392  std::cout << std::endl << std::endl << "*** The problem FAILED!"
393  << std::endl;
394  }
395 }
396 
397 template<typename DIMENSIONS>
398 void DTOIpoptInterface<DIMENSIONS>::GetDTOSolution(state_vector_array_t &yTraj,
399  control_vector_array_t &uTraj, Eigen::VectorXd &hTraj) {
400 
401  Eigen::Map<Eigen::VectorXd> x_local(_valX.data(), _lenX);
402  _method->getStateControlIncrementsTrajectoriesFromDecisionVariables(x_local, yTraj, uTraj, hTraj);
403 
404  return;
405 }
406 
407 template <class DIMENSIONS>
408 void DTOIpoptInterface<DIMENSIONS>::GetDTOSolution( state_vector_array_t & y_trajectory,
409  control_vector_array_t & u_trajectory,Eigen::VectorXd & h_trajectory,
410  std::vector<int> & phaseId) {
411 
412  Eigen::Map<Eigen::VectorXd> x_local(_valX.data(), _lenX);
413 
414  _method->getStateControlIncrementsTrajectoriesFromDecisionVariables(x_local,y_trajectory,u_trajectory,h_trajectory);
415 
416  phaseId = _method->node_to_phase_map;
417 }
418 
419 template<typename DIMENSIONS>
420 void DTOIpoptInterface<DIMENSIONS>::GetFVector(Eigen::VectorXd &fVec) {
421 
422  fVec.resize(1 + _valConstraint.size());
423 
424  fVec(0) = _valCost;
425  fVec.segment(1, _valConstraint.size()) = _valConstraint;
426 
427  return;
428 }
429 
430 //template <class DIMENSIONS>
431 //void DTOIpoptInterface<DIMENSIONS>::WarmInitialization( const state_vector_array_t & y_sol,
432 // const control_vector_array_t & u_sol, const Eigen::VectorXd & h_sol,
433 // const Eigen::VectorXi & p_x_state, const Eigen::VectorXd & p_x_mul,
434 // const Eigen::VectorXi & p_f_state, const Eigen::VectorXd & p_f_mul) {
435 //
436 // // for now we just support initialization of the constraint multipliers
437 // this->_lamdaConstraints = p_f_mul;
438 //
439 // _method->SetDecisionVariablesFromStateControlIncrementsTrajectories(this->_valX,y_sol,u_sol,h_sol);
440 //
441 //}
442 
443 // IPOPT Functions
444 template<typename DIMENSIONS>
445 bool DTOIpoptInterface<DIMENSIONS>::get_nlp_info(Index& n, Index& m,
446  Index& nnz_jac_g, Index& nnz_h_lag, IndexStyleEnum& index_style) {
447  n = _lenX;
448  m = _lenConstraint;
449 
450  nnz_jac_g = _nnzConstraintJac;
451  nnz_h_lag = 0;
452 
453  index_style = TNLP::C_STYLE;
454 
455  return true;
456 }
457 
458 template<typename DIMENSIONS>
459 bool DTOIpoptInterface<DIMENSIONS>::get_bounds_info(Index n, Number* x_l,
460  Number* x_u, Index m, Number* g_l, Number* g_u) {
461 
462  Number *pointer;
463 
464  pointer = _method->myNLP.GetXlb_p();
465  memcpy(x_l, pointer, sizeof(Number) * n);
466 
467  pointer = _method->myNLP.GetXub_p();
468  memcpy(x_u, pointer, sizeof(Number) * n);
469 
470  pointer = _method->myNLP.GetFlb_p();
471  memcpy(g_l, pointer, sizeof(Number) * m);
472 
473  pointer = _method->myNLP.GetFub_p();
474  memcpy(g_u, pointer, sizeof(Number) * m);
475 
476  return true;
477 }
478 
479 template<typename DIMENSIONS>
480 bool DTOIpoptInterface<DIMENSIONS>::get_starting_point(Index n, bool init_x,
481  Number* x, bool init_z, Number* z_L, Number* z_U, Index m,
482  bool init_lambda, Number* lambda) {
483  std::cout << "getting starting points" << std::endl;
484 
485  // if these asserts fail, some options have been set that are not handled
486  assert(init_z == false);
487  if (init_z == true ){
488  std::runtime_error("init_z should be false");
489  }
490 
491  Eigen::Map<Eigen::VectorXd> local_x(x, n);
492  Eigen::Map<Eigen::VectorXd> local_lamndaF(lambda, m);
493  if (init_x){
494  std::cout << "x init is true" << std::endl;
495  local_x = this->_valX;
496  std::cout << "x initialized" << std::endl;
497  }
498  if (init_lambda){
499  std::runtime_error("init_lambda requested"); // TODO: Are these ever set?
500  local_lamndaF = this->_lamdaConstraints;
501  }
502 
503  this->_method->UpdateDecisionVariables(x);
504 // std::cout << "Print X" << std::endl;
505 // for (size_t i=0; i<n; i++){
506 // std::cout << x[i] << std::endl;
507 // }
508 
509  return true;
510 }
511 
512 template<typename DIMENSIONS>
513 bool DTOIpoptInterface<DIMENSIONS>::eval_f(Index n, const Number* x, bool new_x,
514  Number& obj_value) {
515 // std::cout << "eval_f()" << std::endl;
516 
517 // std::cout << "Received decision variables" << std::endl;
518 // std::cout << "Print X" << std::endl;
519 // for (size_t i=0; i<n; i++){
520 // std::cout << x[i] << std::endl;
521 // }
522 //
523 // std::cout << "y[0]" << std::endl;
524 // std::cout << _method->_y_trajectory[0].transpose() << std::endl;
525 // std::cout << "u[0]" << std::endl;
526 // std::cout << _method->_u_trajectory[0].transpose() << std::endl;
527 //
528 // std::cout << "y[1]" << std::endl;
529 // std::cout << _method->_y_trajectory[1].transpose() << std::endl;
530 // std::cout << "u[1]" << std::endl;
531 // std::cout << _method->_u_trajectory[1].transpose() << std::endl;
532 
533  if (new_x) this->_method->UpdateDecisionVariables(x);
534 
535  this->_method->evalObjective(&obj_value);
536 
537  return true;
538 }
539 
540 template<typename DIMENSIONS>
541 bool DTOIpoptInterface<DIMENSIONS>::eval_grad_f(Index n, const Number* x,
542  bool new_x, Number* grad_f) {
543 // std::cout << "eval_grad_f()" << std::endl;
544  if (new_x) this->_method->UpdateDecisionVariables(x);
545 
546  if (use_computeJacobian)
547  return true;
548 
549  Eigen::VectorXd local_sparse_cost_gradient(_jCostJac.size());
550  this->_method->evalSparseJacobianCost(local_sparse_cost_gradient.data());
551 
552  // Set all to zero before filling in the sparsity -> TODO: only set the zero elements
553  for (int i = 0 ; i < n; i++)
554  grad_f[i] = 0;
555 
556  for (int i = 0 ; i < _jCostJac.size() ; i++)
557  grad_f[_jCostJac[i]] = local_sparse_cost_gradient(i);
558 
559  return true;
560 }
561 
562 template<typename DIMENSIONS>
563 bool DTOIpoptInterface<DIMENSIONS>::eval_g(Index n, const Number* x, bool new_x,
564  Index m, Number* g) {
565 // std::cout << "eval_g()" << std::endl;
566  if (new_x) this->_method->UpdateDecisionVariables(x);
567 
568  this->_method->evalConstraintFct(g,m);
569 
570  return true;
571 }
572 
573 template<typename DIMENSIONS>
574 bool DTOIpoptInterface<DIMENSIONS>::eval_jac_g(Index n, const Number* x,
575  bool new_x, Index m, Index nele_jac, Index* iRow, Index* jCol,
576  Number* values) {
577 // std::cout << "eval_jac_g()" << std::endl;
578 
579  assert(nele_jac == _jConstraintJac.size());
580  // Return sparsity structure
581  if (values == NULL) {
582  std::cout << "Return sparsity" << std::endl;
583  // iPopt Bug: the second line produces an error!!
584  // std::memcpy(iRow,_iConstraintJac.data(), sizeof(double) * nele_jac);
585  // std::memcpy(jCol,_jConstraintJac.data(), sizeof(double) * nele_jac);
586 
587  for (int i = 0; i < nele_jac; i++) {
588  iRow[i] = _iConstraintJac(i);
589  jCol[i] = _jConstraintJac(i);
590  }
591 
592 
593  } else if (!use_computeJacobian){
594  if (new_x) this->_method->UpdateDecisionVariables(x);
595 // std::cout << "Computing Sparse Jacobian" << std::endl;
596  this->_method->evalSparseJacobianConstraint(values);
597 // Eigen::Map<Eigen::VectorXd> val_vec(values,this->_method->myNLP.lenG);
598 // for (int i = 0; i < nele_jac; i++) {
599 // std::cout << "(" << _iConstraintJac(i) << "," << _jConstraintJac(i) << ") " << val_vec[i] << std::endl;
600 // std::getchar();
601 // }
602 
603  }
604 
605  return true;
606 }
607 
608 template<typename DIMENSIONS>
609 void DTOIpoptInterface<DIMENSIONS>::finalize_solution(SolverReturn status,
610  Index n, const Number* x, const Number* z_L, const Number* z_U, Index m,
611  const Number* g, const Number* lambda, Number obj_value,
612  const IpoptData* ip_data, IpoptCalculatedQuantities* ip_cq) {
613  this->_method->UpdateDecisionVariables(x);
614 
615  assert(n == _valX.size());
616  assert(m == _valConstraint.size());
617 
618  memcpy(_valX.data(), x, n * sizeof(Number));
619  _valCost = obj_value;
620  memcpy(_valConstraint.data(), g, m * sizeof(Number));
621 
622  return;
623 }
624 } // namespace DirectTrajectoryOptimization
625 #endif /*DT_IPOPT_INTERFACE_HPP_*/
This class serves as a base interface for inheriting classes.
Definition: base_method_interface.hpp:60
DTOIpoptInterface(std::shared_ptr< DTOMethodInterface< DIMENSIONS > > method, bool ipopt_verbose)
Create a new ipopt interface instance.
Definition: ipopt.hpp:64
Base class for NLP Solvers interface.
This class serves as a base interface for inheriting classes.
Definition: base_solver_interface.hpp:47
Class interfacing Ipopt NLP with a DirectTrajectoryOptimization problem.
Definition: ipopt.hpp:50