diff options
author | Harpreet | 2016-08-09 13:05:52 +0530 |
---|---|---|
committer | Harpreet | 2016-08-09 13:05:52 +0530 |
commit | d8fca69f239a275f5ffdbd870508c86b6e69c678 (patch) | |
tree | 8e102f22a671d2b837c2a030911e6870e790fd41 /build | |
parent | 9fd2976931c088dc523974afb901e96bad20f73c (diff) | |
parent | de5fe502b7240a48f9d46b9e210060de5c2b185e (diff) | |
download | FOSSEE-Optim-toolbox-development-d8fca69f239a275f5ffdbd870508c86b6e69c678.tar.gz FOSSEE-Optim-toolbox-development-d8fca69f239a275f5ffdbd870508c86b6e69c678.tar.bz2 FOSSEE-Optim-toolbox-development-d8fca69f239a275f5ffdbd870508c86b6e69c678.zip |
Initial upload
Diffstat (limited to 'build')
-rw-r--r-- | build/Scilab/intfmincon.sci | 51 | ||||
-rw-r--r-- | build/cpp/cpp_intfmincon.cpp | 4 | ||||
-rw-r--r-- | build/cpp/sci_iofunc.cpp | 3 | ||||
-rw-r--r-- | build/cpp/sci_minconTMINLP.cpp | 139 |
4 files changed, 101 insertions, 96 deletions
diff --git a/build/Scilab/intfmincon.sci b/build/Scilab/intfmincon.sci index 9cf9116..0d6cf6d 100644 --- a/build/Scilab/intfmincon.sci +++ b/build/Scilab/intfmincon.sci @@ -207,14 +207,14 @@ function [xopt,fopt,exitflag,gradient,hessian] = intfmincon (varargin) //To check for correct size of A(3rd paramter) if(size(A,2)~=nbVar & size(A,2)~=0) then - errmsg = msprintf(gettext("%s: Expected Matrix of size (No of linear inequality constraints X No of Variables) or an Empty Matrix for Linear Inequality Constraint coefficient Matrix A"), solver_name); + errmsg = msprintf(gettext("%s: Expected Matrix of size (No of linear inequality constraints X No of Variables) or an Empty Matrix for Linear Inequality Constraint coefficient Matrix A"), intfmincon); error(errmsg); end nbConInEq=size(A,"r"); //To check for the correct size of Aeq (5th paramter) if(size(Aeq,2)~=nbVar & size(Aeq,2)~=0) then - errmsg = msprintf(gettext("%s: Expected Matrix of size (No of linear equality constraints X No of Variables) or an Empty Matrix for Linear Equality Constraint coefficient Matrix Aeq"), solver_name); + errmsg = msprintf(gettext("%s: Expected Matrix of size (No of linear equality constraints X No of Variables) or an Empty Matrix for Linear Equality Constraint coefficient Matrix Aeq"), intfmincon); error(errmsg); end nbConEq=size(Aeq,"r"); @@ -340,18 +340,18 @@ options = list('integertolerance',1d-06,'maxnodes',2147483647,'cputime',1d10,'al if (type(nlc) == 13 | type(nlc) == 11) then if(execstr('[sample_c,sample_ceq] = nlc(x0)','errcatch')==21) then - errmsg = msprintf(gettext("%s: Non-Linear Constraint function and x0 did not match"), solver_name); + errmsg = msprintf(gettext("%s: Non-Linear Constraint function and x0 did not match"), intfmincon); error(errmsg); end numNlic = size(sample_c,"*"); numNlec = size(sample_ceq,"*"); - numNlc = no_nlic + no_nlec; + numNlc = numNlic + numNlec; end /////////////// Creating conLb and conUb //////////////////////// - conLb = [repmat(-%inf,nbConInEq,1);beq;repmat(-%inf,numNlic,1);repmat(0,numNlic,1);] - conUb = [b;beq;repmat(0,numNlic,1);repmat(0,numNlic,1);] + conLb = [repmat(-%inf,numNlic,1);repmat(0,numNlec,1);repmat(-%inf,nbConInEq,1);beq;] + conUb = [repmat(0,numNlic,1);repmat(0,numNlec,1);b;beq;] //Converting the User defined Objective function into Required form (Error Detectable) function [y,check] = _f(x) @@ -386,17 +386,13 @@ options = list('integertolerance',1d-06,'maxnodes',2147483647,'cputime',1d10,'al endfunction function [y,check] = _addnlc(x) - x= x(:); - c=[] - ceq = []; + x= x(:) try - if(type(nlc) == 13 & numNlc~=0) then - { - [c,ceq] = nlc(x); - } + if((type(nlc) == 13 | type(nlc) == 11) & numNlc~=0) then + [c,ceq]=nlc(x) end ylin = [A*x;Aeq*x]; - y = [ylin;c(:);ceq(:)]; + y = [c(:);ceq(:);ylin(:);]; [y,check] = checkIsreal(y) catch y=0; @@ -404,11 +400,11 @@ options = list('integertolerance',1d-06,'maxnodes',2147483647,'cputime',1d10,'al end endfunction - //Defining an inbuilt Objective gradient function + //Defining an inbuilt jacobian of constraints function function [dy,check] = _gradnlc(x) if (options(16) =="on") then try - [y,dy]=_addnlc(x) + [y,dy]=nlc(x) [dy,check] = checkIsreal(dy) catch dy = 0; @@ -427,6 +423,7 @@ options = list('integertolerance',1d-06,'maxnodes',2147483647,'cputime',1d10,'al //Defining a function to calculate Hessian if the respective user entry is OFF function [hessy,check]=_gradhess(x,obj_factor,lambda) + x=x(:); if (type(options(14)) == "function") then try [obj,dy,hessy] = fun(x,obj_factor,lambda) @@ -437,17 +434,15 @@ options = list('integertolerance',1d-06,'maxnodes',2147483647,'cputime',1d10,'al end else try - [dy,hessfy]=numderivative(_f,x,%eps^(1/3),1,"blockmat"); - hessny = [] - if(type(nlc) == 13 & numNlc~=0) then - { - [dy,hessny] = numderivative(_addnlc,x,%eps^(1/3),1,"blockmat"); - } + [dy,hessfy]=numderivative(_f,x) + hessfy = matrix(hessfy,nbVar,nbVar) + if((type(nlc) == 13 | type(nlc) == 11) & numNlc~=0) then + [dy,hessny]=numderivative(nlc,x) end hessianc = [] - for i = 1:numNlc - hessianc = hessianc + lambda(i)*hessny((i-1)*nbVar+1:nbVar*i,:) - end + for i = 1:numNlc + hessianc = hessianc + lambda(i)*matrix(hessny(i,:),nbVar,nbVar) + end hessy = obj_factor*hessfy + hessianc; [hessy,check] = checkIsreal(hessy) catch @@ -457,7 +452,7 @@ options = list('integertolerance',1d-06,'maxnodes',2147483647,'cputime',1d10,'al end endfunction - intconsize = size(intcon,"*"); + intconsize = size(intcon,"*") [xopt,fopt,exitflag] = inter_fmincon(_f,_gradf,_addnlc,_gradnlc,_gradhess,x0,lb,ub,conLb,conUb,intcon,options,nbConInEq+nbConEq); @@ -466,7 +461,7 @@ options = list('integertolerance',1d-06,'maxnodes',2147483647,'cputime',1d10,'al gradient = []; hessian = []; else - [ gradient, hessian] = numderivative(_f, xopt, [], [], "blockmat"); + [ gradient, hessian] = numderivative(_f, xopt) end //To print output message @@ -495,7 +490,7 @@ function [y, check] = checkIsreal(x) y = 0 check=1; else - y = x; + y = x; check=0; end endfunction diff --git a/build/cpp/cpp_intfmincon.cpp b/build/cpp/cpp_intfmincon.cpp index 50270cf..d921128 100644 --- a/build/cpp/cpp_intfmincon.cpp +++ b/build/cpp/cpp_intfmincon.cpp @@ -121,6 +121,7 @@ int cpp_intfmincon(char *fname) BonminSetup bonmin; bonmin.initializeOptionsAndJournalist(); bonmin.options()->SetStringValue("mu_oracle","loqo"); + bonmin.options()->SetIntegerValue("bonmin.print_level",5); bonmin.options()->SetNumericValue("bonmin.integer_tolerance", *integertolerance); bonmin.options()->SetIntegerValue("bonmin.node_limit", (int)*maxnodes); bonmin.options()->SetNumericValue("bonmin.time_limit", *cputime); @@ -136,13 +137,10 @@ int cpp_intfmincon(char *fname) bb(bonmin);//process parameter file using Ipopt and do branch and bound using Cbc } catch(TNLPSolver::UnsolvedError *E) { - Scierror(999, "\nIpopt has failed to solve the problem!\n"); } catch(OsiTMINLPInterface::SimpleError &E) { - Scierror(999, "\nFailed to solve a problem!\n"); } catch(CoinError &E) { - Scierror(999, "\nFailed to solve a problem!\n"); } rstatus=tminlp->returnStatus(); diff --git a/build/cpp/sci_iofunc.cpp b/build/cpp/sci_iofunc.cpp index 259f7c3..f05839c 100644 --- a/build/cpp/sci_iofunc.cpp +++ b/build/cpp/sci_iofunc.cpp @@ -258,8 +258,7 @@ bool getHessFromScilab(int n,int numConstr_,char name[], double *x,double *obj,d if(getDoubleMatrixFromScilab(posFirstElementOnStackForSF, &x_rows, &x_cols, dest)) { sciprint("No results "); - return 1; - + return 1; } } return 0; diff --git a/build/cpp/sci_minconTMINLP.cpp b/build/cpp/sci_minconTMINLP.cpp index ac688d4..7885083 100644 --- a/build/cpp/sci_minconTMINLP.cpp +++ b/build/cpp/sci_minconTMINLP.cpp @@ -27,7 +27,7 @@ extern "C" using namespace Ipopt; using namespace Bonmin; -#define DEBUG 0 +//#define DEBUG 0 minconTMINLP::~minconTMINLP() { @@ -37,6 +37,9 @@ minconTMINLP::~minconTMINLP() // Set the type of every variable - CONTINUOUS or INTEGER bool minconTMINLP::get_variables_types(Index n, VariableType* var_types) { + #ifdef DEBUG + sciprint("Code is in get_variables_types\n"); + #endif n = numVars_; for(int i=0; i < n; i++) var_types[i] = CONTINUOUS; @@ -48,6 +51,9 @@ bool minconTMINLP::get_variables_types(Index n, VariableType* var_types) // The linearity of the variables - LINEAR or NON_LINEAR bool minconTMINLP::get_variables_linearity(Index n, Ipopt::TNLP::LinearityType* var_types) { + #ifdef DEBUG + sciprint("Code is in get_variables_linearity\n"); + #endif for(int i=0;i<n;i++) { var_types[i] = Ipopt::TNLP::NON_LINEAR; @@ -57,6 +63,10 @@ bool minconTMINLP::get_variables_linearity(Index n, Ipopt::TNLP::LinearityType* // The linearity of the constraints - LINEAR or NON_LINEAR bool minconTMINLP::get_constraints_linearity(Index m, Ipopt::TNLP::LinearityType* const_types) { + + #ifdef DEBUG + sciprint("Code is in get_constraints_linearity\n"); + #endif for(int i=0;i<numLC_;i++) { const_types[i] = Ipopt::TNLP::LINEAR; @@ -71,6 +81,9 @@ bool minconTMINLP::get_constraints_linearity(Index m, Ipopt::TNLP::LinearityType //get NLP info such as number of variables,constraints,no.of elements in jacobian and hessian to allocate memory bool minconTMINLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g, Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style) { + #ifdef DEBUG + sciprint("Code is in get_nlp_info\n"); + #endif n=numVars_; // Number of variables m=numCons_; // Number of constraints nnz_jac_g = n*m; // No. of elements in Jacobian of constraints @@ -99,6 +112,57 @@ bool minconTMINLP::get_bounds_info(Index n, Number* x_l, Number* x_u, Index m, N return true; } +// This method sets initial values for required vectors . For now we are assuming 0 to all values. +bool minconTMINLP::get_starting_point(Index n, bool init_x, Number* x,bool init_z, Number* z_L, Number* z_U,Index m, bool init_lambda,Number* lambda) +{ + assert(init_x == true); + assert(init_z == false); + assert(init_lambda == false); + if (init_x == true) + { //we need to set initial values for vector x + for (Index var=0;var<n;var++) + {x[var]=x0_[var];}//initialize with 0. + } + return true; +} + +//get value of objective function at vector x +bool minconTMINLP::eval_f(Index n, const Number* x, bool new_x, Number& obj_value) +{ + #ifdef DEBUG + sciprint("Code is eval_f\n"); + #endif + char name[20]="_f"; + Number *obj; + if (getFunctionFromScilab(n,name,x, 7, 1,2,&obj)) + { + return false; + } + obj_value = *obj; + return true; +} + +//get value of gradient of objective function at vector x. +bool minconTMINLP::eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f) +{ + #ifdef DEBUG + sciprint("Code is in eval_grad_f\n"); + #endif + char name[20]="_gradf"; + Number *resg; + if (getFunctionFromScilab(n,name,x, 7, 1,2,&resg)) + { + return false; + } + + Index i; + for(i=0;i<numVars_;i++) + { + grad_f[i]=resg[i]; + } + return true; +} + // return the value of the constraints: g(x) bool minconTMINLP::eval_g(Index n, const Number* x, bool new_x, Index m, Number* g) { @@ -145,7 +209,7 @@ bool minconTMINLP::eval_jac_g(Index n, const Number* x, bool new_x,Index m, Inde else { unsigned int i,j,idx=0; - for(int i=0;i<m;i++) + for(i=0;i<m;i++) for(j=0;j<n;j++) { iRow[idx]=i; @@ -182,58 +246,6 @@ bool minconTMINLP::eval_jac_g(Index n, const Number* x, bool new_x,Index m, Inde return true; } -//get value of objective function at vector x -bool minconTMINLP::eval_f(Index n, const Number* x, bool new_x, Number& obj_value) -{ - #ifdef DEBUG - sciprint("Code is eval_f\n"); - #endif - char name[20]="_f"; - Number *obj; - if (getFunctionFromScilab(n,name,x, 7, 1,2,&obj)) - { - return false; - } - obj_value = *obj; - return true; -} - -//get value of gradient of objective function at vector x. -bool minconTMINLP::eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f) -{ - #ifdef DEBUG - sciprint("Code is in eval_grad_f\n"); - #endif - char name[20]="_gradf"; - Number *resg; - if (getFunctionFromScilab(n,name,x, 7, 1,2,&resg)) - { - return false; - } - - Index i; - for(i=0;i<numVars_;i++) - { - grad_f[i]=resg[i]; - } - - return true; -} - -// This method sets initial values for required vectors . For now we are assuming 0 to all values. -bool minconTMINLP::get_starting_point(Index n, bool init_x, Number* x,bool init_z, Number* z_L, Number* z_U,Index m, bool init_lambda,Number* lambda) -{ - assert(init_x == true); - assert(init_z == false); - assert(init_lambda == false); - if (init_x == true) - { //we need to set initial values for vector x - for (Index var=0;var<n;var++) - {x[var]=x0_[var];}//initialize with 0. - } - return true; -} - /* * Return either the sparsity structure of the Hessian of the Lagrangian, * or the values of the Hessian of the Lagrangian for the given values for @@ -251,26 +263,27 @@ bool minconTMINLP::eval_h(Index n, const Number* x, bool new_x,Number obj_factor Index idx=0; for (Index row = 0; row < numVars_; row++) { - for (Index col = 0; col <= row; col++) - { iRow[idx] = row; + for (Index col = 0; col < numVars_; col++) + { + iRow[idx] = row; jCol[idx] = col; idx++; } } - } + } else { char name[20]="_gradhess"; - Number *resh; - if (getHessFromScilab(n,m,name,x, &obj_factor, lambda, 7, 3,2,&resh)) + Number *resCh; + if (getHessFromScilab(n,m,name,x, &obj_factor, lambda, 7, 3,2,&resCh)) { return false; } Index index=0; for (Index row=0;row < numVars_ ;++row) { - for (Index col=0; col <= row; ++col) + for (Index col=0; col < numVars_; ++col) { - values[index++]=(resh[numVars_*row+col]); + values[index++]=resCh[numVars_*row+col]; } } } @@ -281,18 +294,18 @@ void minconTMINLP::finalize_solution(SolverReturn status,Index n, const Number* { #ifdef DEBUG sciprint("Code is in finalize_solution\n"); + sciprint("%d",status); #endif finalObjVal_ = obj_value; status_ = status; if(status==0 ||status== 3) { - finalX_ = (double*)malloc(sizeof(double) * numVars_ * 1); + finalX_ = (double*)malloc(sizeof(double) * numVars_*1); for (Index i=0; i<numVars_; i++) { finalX_[i] = x[i]; } } - } const double * minconTMINLP::getX() |