From 4b64cf486f5c999fd8167758cae27839f3b50848 Mon Sep 17 00:00:00 2001 From: Harpreet Date: Sat, 3 Sep 2016 00:34:27 +0530 Subject: Structure updated and intqpipopt files added --- .../linux/include/coin/IpOrigIpoptNLP.hpp | 488 +++++++++++++++++++++ 1 file changed, 488 insertions(+) create mode 100644 newstructure/thirdparty/linux/include/coin/IpOrigIpoptNLP.hpp (limited to 'newstructure/thirdparty/linux/include/coin/IpOrigIpoptNLP.hpp') diff --git a/newstructure/thirdparty/linux/include/coin/IpOrigIpoptNLP.hpp b/newstructure/thirdparty/linux/include/coin/IpOrigIpoptNLP.hpp new file mode 100644 index 0000000..41b10fa --- /dev/null +++ b/newstructure/thirdparty/linux/include/coin/IpOrigIpoptNLP.hpp @@ -0,0 +1,488 @@ +// Copyright (C) 2004, 2010 International Business Machines and others. +// All Rights Reserved. +// This code is published under the Eclipse Public License. +// +// $Id: IpOrigIpoptNLP.hpp 2594 2015-08-09 14:31:05Z stefan $ +// +// Authors: Carl Laird, Andreas Waechter IBM 2004-08-13 + +#ifndef __IPORIGIPOPTNLP_HPP__ +#define __IPORIGIPOPTNLP_HPP__ + +#include "IpIpoptNLP.hpp" +#include "IpException.hpp" +#include "IpTimingStatistics.hpp" + +namespace Ipopt +{ + + /** enumeration for the Hessian information type. */ + enum HessianApproximationType { + EXACT=0, + LIMITED_MEMORY + }; + + /** enumeration for the Hessian approximation space. */ + enum HessianApproximationSpace { + NONLINEAR_VARS=0, + ALL_VARS + }; + + /** This class maps the traditional NLP into + * something that is more useful by Ipopt. + * This class takes care of storing the + * calculated model results, handles caching, + * and (some day) takes care of addition of slacks. + */ + class OrigIpoptNLP : public IpoptNLP + { + public: + /**@name Constructors/Destructors */ + //@{ + OrigIpoptNLP(const SmartPtr& jnlst, + const SmartPtr& nlp, + const SmartPtr& nlp_scaling); + + /** Default destructor */ + virtual ~OrigIpoptNLP(); + //@} + + /** Initialize - overloaded from IpoptNLP */ + virtual bool Initialize(const Journalist& jnlst, + const OptionsList& options, + const std::string& prefix); + + /** Initialize (create) structures for + * the iteration data */ + virtual bool InitializeStructures(SmartPtr& x, + bool init_x, + SmartPtr& y_c, + bool init_y_c, + SmartPtr& y_d, + bool init_y_d, + SmartPtr& z_L, + bool init_z_L, + SmartPtr& z_U, + bool init_z_U, + SmartPtr& v_L, + SmartPtr& v_U + ); + + /** Method accessing the GetWarmStartIterate of the NLP */ + virtual bool GetWarmStartIterate(IteratesVector& warm_start_iterate) + { + return nlp_->GetWarmStartIterate(warm_start_iterate); + } + /** Accessor methods for model data */ + //@{ + /** Objective value */ + virtual Number f(const Vector& x); + + /** Objective value (depending in mu) - incorrect version for + * OrigIpoptNLP */ + virtual Number f(const Vector& x, Number mu); + + /** Gradient of the objective */ + virtual SmartPtr grad_f(const Vector& x); + + /** Gradient of the objective (depending in mu) - incorrect + * version for OrigIpoptNLP */ + virtual SmartPtr grad_f(const Vector& x, Number mu); + + /** Equality constraint residual */ + virtual SmartPtr c(const Vector& x); + + /** Jacobian Matrix for equality constraints */ + virtual SmartPtr jac_c(const Vector& x); + + /** Inequality constraint residual (reformulated + * as equalities with slacks */ + virtual SmartPtr d(const Vector& x); + + /** Jacobian Matrix for inequality constraints*/ + virtual SmartPtr jac_d(const Vector& x); + + /** Hessian of the Lagrangian */ + virtual SmartPtr h(const Vector& x, + Number obj_factor, + const Vector& yc, + const Vector& yd + ); + + /** Hessian of the Lagrangian (depending in mu) - incorrect + * version for OrigIpoptNLP */ + virtual SmartPtr h(const Vector& x, + Number obj_factor, + const Vector& yc, + const Vector& yd, + Number mu); + + /** Provides a Hessian matrix from the correct matrix space with + * uninitialized values. This can be used in LeastSquareMults to + * obtain a "zero Hessian". */ + virtual SmartPtr uninitialized_h(); + + /** Lower bounds on x */ + virtual SmartPtr x_L() const + { + return x_L_; + } + + /** Permutation matrix (x_L_ -> x) */ + virtual SmartPtr Px_L() const + { + return Px_L_; + } + + /** Upper bounds on x */ + virtual SmartPtr x_U() const + { + return x_U_; + } + + /** Permutation matrix (x_U_ -> x */ + virtual SmartPtr Px_U() const + { + return Px_U_; + } + + /** Lower bounds on d */ + virtual SmartPtr d_L() const + { + return d_L_; + } + + /** Permutation matrix (d_L_ -> d) */ + virtual SmartPtr Pd_L() const + { + return Pd_L_; + } + + /** Upper bounds on d */ + virtual SmartPtr d_U() const + { + return d_U_; + } + + /** Permutation matrix (d_U_ -> d */ + virtual SmartPtr Pd_U() const + { + return Pd_U_; + } + + virtual SmartPtr HessianMatrixSpace() const + { + return h_space_; + } + + virtual SmartPtr x_space() const + { + return x_space_; + } + //@} + + /** Accessor method for vector/matrix spaces pointers */ + virtual void GetSpaces(SmartPtr& x_space, + SmartPtr& c_space, + SmartPtr& d_space, + SmartPtr& x_l_space, + SmartPtr& px_l_space, + SmartPtr& x_u_space, + SmartPtr& px_u_space, + SmartPtr& d_l_space, + SmartPtr& pd_l_space, + SmartPtr& d_u_space, + SmartPtr& pd_u_space, + SmartPtr& Jac_c_space, + SmartPtr& Jac_d_space, + SmartPtr& Hess_lagrangian_space); + + /** Method for adapting the variable bounds. This is called if + * slacks are becoming too small */ + virtual void AdjustVariableBounds(const Vector& new_x_L, + const Vector& new_x_U, + const Vector& new_d_L, + const Vector& new_d_U); + + /** @name Counters for the number of function evaluations. */ + //@{ + virtual Index f_evals() const + { + return f_evals_; + } + virtual Index grad_f_evals() const + { + return grad_f_evals_; + } + virtual Index c_evals() const + { + return c_evals_; + } + virtual Index jac_c_evals() const + { + return jac_c_evals_; + } + virtual Index d_evals() const + { + return d_evals_; + } + virtual Index jac_d_evals() const + { + return jac_d_evals_; + } + virtual Index h_evals() const + { + return h_evals_; + } + //@} + + /** Solution Routines - overloaded from IpoptNLP*/ + //@{ + void FinalizeSolution(SolverReturn status, + const Vector& x, const Vector& z_L, const Vector& z_U, + const Vector& c, const Vector& d, + const Vector& y_c, const Vector& y_d, + Number obj_value, + const IpoptData* ip_data, + IpoptCalculatedQuantities* ip_cq); + bool IntermediateCallBack(AlgorithmMode mode, + Index iter, Number obj_value, + Number inf_pr, Number inf_du, + Number mu, Number d_norm, + Number regularization_size, + Number alpha_du, Number alpha_pr, + Index ls_trials, + SmartPtr ip_data, + SmartPtr ip_cq); + //@} + + /** @name Methods for IpoptType */ + //@{ + /** Called by IpoptType to register the options */ + static void RegisterOptions(SmartPtr roptions); + //@} + + /** Accessor method to the underlying NLP */ + SmartPtr nlp() + { + return nlp_; + } + + /**@name Methods related to function evaluation timing. */ + //@{ + + /** Reset the timing statistics */ + void ResetTimes(); + + void PrintTimingStatistics(Journalist& jnlst, + EJournalLevel level, + EJournalCategory category) const; + + const TimedTask& f_eval_time() const + { + return f_eval_time_; + } + const TimedTask& grad_f_eval_time() const + { + return grad_f_eval_time_; + } + const TimedTask& c_eval_time() const + { + return c_eval_time_; + } + const TimedTask& jac_c_eval_time() const + { + return jac_c_eval_time_; + } + const TimedTask& d_eval_time() const + { + return d_eval_time_; + } + const TimedTask& jac_d_eval_time() const + { + return jac_d_eval_time_; + } + const TimedTask& h_eval_time() const + { + return h_eval_time_; + } + + Number TotalFunctionEvaluationCpuTime() const; + Number TotalFunctionEvaluationSysTime() const; + Number TotalFunctionEvaluationWallclockTime() const; + //@} + + private: + /** journalist */ + SmartPtr jnlst_; + + /** Pointer to the NLP */ + SmartPtr nlp_; + + /** Necessary Vector/Matrix spaces */ + //@{ + SmartPtr x_space_; + SmartPtr c_space_; + SmartPtr d_space_; + SmartPtr x_l_space_; + SmartPtr px_l_space_; + SmartPtr x_u_space_; + SmartPtr px_u_space_; + SmartPtr d_l_space_; + SmartPtr pd_l_space_; + SmartPtr d_u_space_; + SmartPtr pd_u_space_; + SmartPtr jac_c_space_; + SmartPtr jac_d_space_; + SmartPtr h_space_; + + SmartPtr scaled_jac_c_space_; + SmartPtr scaled_jac_d_space_; + SmartPtr scaled_h_space_; + //@} + /**@name Storage for Model Quantities */ + //@{ + /** Objective function */ + CachedResults f_cache_; + + /** Gradient of the objective function */ + CachedResults > grad_f_cache_; + + /** Equality constraint residuals */ + CachedResults > c_cache_; + + /** Jacobian Matrix for equality constraints + * (current iteration) */ + CachedResults > jac_c_cache_; + + /** Inequality constraint residual (reformulated + * as equalities with slacks */ + CachedResults > d_cache_; + + /** Jacobian Matrix for inequality constraints + * (current iteration) */ + CachedResults > jac_d_cache_; + + /** Hessian of the lagrangian + * (current iteration) */ + CachedResults > h_cache_; + + /** Unscaled version of x vector */ + CachedResults > unscaled_x_cache_; + + /** Lower bounds on x */ + SmartPtr x_L_; + + /** Permutation matrix (x_L_ -> x) */ + SmartPtr Px_L_; + + /** Upper bounds on x */ + SmartPtr x_U_; + + /** Permutation matrix (x_U_ -> x */ + SmartPtr Px_U_; + + /** Lower bounds on d */ + SmartPtr d_L_; + + /** Permutation matrix (d_L_ -> d) */ + SmartPtr Pd_L_; + + /** Upper bounds on d */ + SmartPtr d_U_; + + /** Permutation matrix (d_U_ -> d */ + SmartPtr Pd_U_; + + /** Original unmodified lower bounds on x */ + SmartPtr orig_x_L_; + + /** Original unmodified upper bounds on x */ + SmartPtr orig_x_U_; + //@} + + /**@name Default Compiler Generated Methods + * (Hidden to avoid implicit creation/calling). + * These methods are not implemented and + * we do not want the compiler to implement + * them for us, so we declare them private + * and do not define them. This ensures that + * they will not be implicitly created/called. */ + //@{ + /** Default Constructor */ + OrigIpoptNLP(); + + /** Copy Constructor */ + OrigIpoptNLP(const OrigIpoptNLP&); + + /** Overloaded Equals Operator */ + void operator=(const OrigIpoptNLP&); + //@} + + /** @name auxilliary functions */ + //@{ + /** relax the bounds by a relative move of relax_bound_factor. + * Here, relax_bound_factor should be negative (or zero) for + * lower bounds, and positive (or zero) for upper bounds. + */ + void relax_bounds(Number bound_relax_factor, Vector& bounds); + /** Method for getting the unscaled version of the x vector */ + SmartPtr get_unscaled_x(const Vector& x); + //@} + + /** @name Algorithmic parameters */ + //@{ + /** relaxation factor for the bounds */ + Number bound_relax_factor_; + /** Flag indicating whether the primal variables should be + * projected back into original bounds are optimization. */ + bool honor_original_bounds_; + /** Flag indicating whether the TNLP with identical structure has + * already been solved before. */ + bool warm_start_same_structure_; + /** Flag indicating what Hessian information is to be used. */ + HessianApproximationType hessian_approximation_; + /** Flag indicating in which space Hessian is to be approximated. */ + HessianApproximationSpace hessian_approximation_space_; + /** Flag indicating whether it is desired to check if there are + * Nan or Inf entries in first and second derivative matrices. */ + bool check_derivatives_for_naninf_; + /** Flag indicating if we need to ask for equality constraint + * Jacobians only once */ + bool jac_c_constant_; + /** Flag indicating if we need to ask for inequality constraint + * Jacobians only once */ + bool jac_d_constant_; + /** Flag indicating if we need to ask for Hessian only once */ + bool hessian_constant_; + //@} + + /** @name Counters for the function evaluations */ + //@{ + Index f_evals_; + Index grad_f_evals_; + Index c_evals_; + Index jac_c_evals_; + Index d_evals_; + Index jac_d_evals_; + Index h_evals_; + //@} + + /** Flag indicating if initialization method has been called */ + bool initialized_; + + /**@name Timing statistics for the function evaluations. */ + //@{ + TimedTask f_eval_time_; + TimedTask grad_f_eval_time_; + TimedTask c_eval_time_; + TimedTask jac_c_eval_time_; + TimedTask d_eval_time_; + TimedTask jac_d_eval_time_; + TimedTask h_eval_time_; + //@} + }; + +} // namespace Ipopt + +#endif -- cgit