summaryrefslogtreecommitdiff
path: root/build/cpp
diff options
context:
space:
mode:
authorHarpreet2016-08-04 15:25:44 +0530
committerHarpreet2016-08-04 15:25:44 +0530
commit9fd2976931c088dc523974afb901e96bad20f73c (patch)
tree22502de6e6988d5cd595290d11266f8432ad825b /build/cpp
downloadFOSSEE-Optim-toolbox-development-9fd2976931c088dc523974afb901e96bad20f73c.tar.gz
FOSSEE-Optim-toolbox-development-9fd2976931c088dc523974afb901e96bad20f73c.tar.bz2
FOSSEE-Optim-toolbox-development-9fd2976931c088dc523974afb901e96bad20f73c.zip
initial add
Diffstat (limited to 'build/cpp')
-rw-r--r--build/cpp/cpp_intfminbnd.cpp172
-rw-r--r--build/cpp/cpp_intfmincon.cpp191
-rw-r--r--build/cpp/cpp_intfminunc.cpp174
-rw-r--r--build/cpp/minbndTMINLP.hpp114
-rw-r--r--build/cpp/minconTMINLP.hpp124
-rw-r--r--build/cpp/minuncTMINLP.hpp113
-rw-r--r--build/cpp/sci_iofunc.cpp334
-rw-r--r--build/cpp/sci_iofunc.hpp25
-rw-r--r--build/cpp/sci_minbndTMINLP.cpp218
-rw-r--r--build/cpp/sci_minconTMINLP.cpp311
-rw-r--r--build/cpp/sci_minconTMINLP.cpp~267
-rw-r--r--build/cpp/sci_minuncTMINLP.cpp237
12 files changed, 2280 insertions, 0 deletions
diff --git a/build/cpp/cpp_intfminbnd.cpp b/build/cpp/cpp_intfminbnd.cpp
new file mode 100644
index 0000000..4914111
--- /dev/null
+++ b/build/cpp/cpp_intfminbnd.cpp
@@ -0,0 +1,172 @@
+// Copyright (C) 2016 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Harpreet Singh
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+
+#include "CoinPragma.hpp"
+#include "CoinTime.hpp"
+#include "CoinError.hpp"
+
+#include "BonOsiTMINLPInterface.hpp"
+#include "BonIpoptSolver.hpp"
+#include "minbndTMINLP.hpp"
+#include "BonCbc.hpp"
+#include "BonBonminSetup.hpp"
+
+#include "BonOACutGenerator2.hpp"
+#include "BonEcpCuts.hpp"
+#include "BonOaNlpOptim.hpp"
+
+#include "sci_iofunc.hpp"
+extern "C"
+{
+#include "call_scilab.h"
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <BOOL.h>
+#include <localization.h>
+#include <sciprint.h>
+
+int cpp_intfminbnd(char *fname)
+{
+ using namespace Ipopt;
+ using namespace Bonmin;
+
+ CheckInputArgument(pvApiCtx, 8, 8);
+ CheckOutputArgument(pvApiCtx, 3, 3);
+
+ // Input arguments
+ Number *integertolerance=NULL, *maxnodes=NULL, *allowablegap=NULL, *cputime=NULL,*max_iter=NULL, *lb = NULL, *ub = NULL;
+ static unsigned int nVars = 0;
+ unsigned int temp1 = 0,temp2 = 0, iret = 0;
+ int x0_rows, x0_cols,intconSize;
+ Number *intcon = NULL,*options=NULL, *ifval=NULL;
+
+ // Output arguments
+ Number *fX = NULL, ObjVal=0,iteration=0,cpuTime=0,fobj_eval=0;
+ Number dual_inf, constr_viol, complementarity, kkt_error;
+ int rstatus = 0;
+
+ if(getDoubleMatrixFromScilab(4, &x0_rows, &x0_cols, &lb))
+ {
+ return 1;
+ }
+
+ if(getDoubleMatrixFromScilab(5, &x0_rows, &x0_cols, &ub))
+ {
+ return 1;
+ }
+
+ // Getting intcon
+ if (getDoubleMatrixFromScilab(6,&intconSize,&temp2,&intcon))
+ {
+ return 1;
+ }
+
+ //Initialization of parameters
+ nVars=x0_rows;
+ temp1 = 1;
+ temp2 = 1;
+
+ //Getting parameters
+ if (getFixedSizeDoubleMatrixInList(7,2,temp1,temp2,&integertolerance))
+ {
+ return 1;
+ }
+ if (getFixedSizeDoubleMatrixInList(7,4,temp1,temp2,&maxnodes))
+ {
+ return 1;
+ }
+ if (getFixedSizeDoubleMatrixInList(7,6,temp1,temp2,&cputime))
+ {
+ return 1;
+ }
+ if (getFixedSizeDoubleMatrixInList(7,8,temp1,temp2,&allowablegap))
+ {
+ return 1;
+ }
+ if (getFixedSizeDoubleMatrixInList(7,10,temp1,temp2,&max_iter))
+ {
+ return 1;
+ }
+
+ SmartPtr<minbndTMINLP> tminlp = new minbndTMINLP(nVars,lb,ub,intconSize,intcon);
+
+ BonminSetup bonmin;
+ bonmin.initializeOptionsAndJournalist();
+
+ bonmin.options()->SetStringValue("mu_oracle","loqo");
+ bonmin.options()->SetNumericValue("bonmin.integer_tolerance", *integertolerance);
+ bonmin.options()->SetIntegerValue("bonmin.node_limit", (int)*maxnodes);
+ bonmin.options()->SetNumericValue("bonmin.time_limit", *cputime);
+ bonmin.options()->SetNumericValue("bonmin.allowable_gap", *allowablegap);
+ bonmin.options()->SetIntegerValue("bonmin.iteration_limit", (int)*max_iter);
+
+ //Now initialize from tminlp
+ bonmin.initialize(GetRawPtr(tminlp));
+
+ //Set up done, now let's branch and bound
+ try {
+ Bab bb;
+ bb(bonmin);//process parameter file using Ipopt and do branch and bound using Cbc
+ }
+ catch(TNLPSolver::UnsolvedError *E) {
+ Scierror(999, "\nIpopt has failed to solve the problem!\n");
+ }
+ catch(OsiTMINLPInterface::SimpleError &E) {
+ Scierror(999, "\nFailed to solve a problem!\n");
+ }
+ catch(CoinError &E) {
+ Scierror(999, "\nFailed to solve a problem!\n");
+ }
+ rstatus=tminlp->returnStatus();
+
+ if(rstatus==0 ||rstatus== 3)
+ {
+ fX = tminlp->getX();
+ ObjVal = tminlp->getObjVal();
+ if (returnDoubleMatrixToScilab(1, nVars, 1, fX))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(2, 1, 1, &ObjVal))
+ {
+ return 1;
+ }
+
+ if (returnIntegerMatrixToScilab(3, 1, 1, &rstatus))
+ {
+ return 1;
+ }
+
+ }
+ else
+ {
+ if (returnDoubleMatrixToScilab(1, 0, 0, fX))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(2, 1, 1, &ObjVal))
+ {
+ return 1;
+ }
+
+ if (returnIntegerMatrixToScilab(3, 1, 1, &rstatus))
+ {
+ return 1;
+ }
+
+ }
+
+ return 0;
+ }
+}
+
diff --git a/build/cpp/cpp_intfmincon.cpp b/build/cpp/cpp_intfmincon.cpp
new file mode 100644
index 0000000..50270cf
--- /dev/null
+++ b/build/cpp/cpp_intfmincon.cpp
@@ -0,0 +1,191 @@
+// Copyright (C) 2016 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Harpreet Singh
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+
+#include "CoinPragma.hpp"
+#include "CoinTime.hpp"
+#include "CoinError.hpp"
+
+#include "BonOsiTMINLPInterface.hpp"
+#include "BonIpoptSolver.hpp"
+#include "minconTMINLP.hpp"
+#include "BonCbc.hpp"
+#include "BonBonminSetup.hpp"
+
+#include "BonOACutGenerator2.hpp"
+#include "BonEcpCuts.hpp"
+#include "BonOaNlpOptim.hpp"
+
+#include "sci_iofunc.hpp"
+extern "C"
+{
+#include "call_scilab.h"
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <BOOL.h>
+#include <localization.h>
+#include <sciprint.h>
+
+int cpp_intfmincon(char *fname)
+{
+ using namespace Ipopt;
+ using namespace Bonmin;
+
+ CheckInputArgument(pvApiCtx, 13, 13);
+ CheckOutputArgument(pvApiCtx, 3, 3);
+
+ // Input arguments
+ Number *integertolerance=NULL, *maxnodes=NULL, *allowablegap=NULL, *cputime=NULL,*max_iter=NULL;
+ Number *x0 = NULL, *lb = NULL, *ub = NULL,*conLb = NULL, *conUb = NULL,*LC = NULL;
+ static unsigned int nVars = 0,nCons = 0;
+ unsigned int temp1 = 0,temp2 = 0, iret = 0;
+ int x0_rows, x0_cols,intconSize;
+ Number *intcon = NULL,*options=NULL, *ifval=NULL;
+
+ // Output arguments
+ Number *fX = NULL, ObjVal=0,iteration=0,cpuTime=0,fobj_eval=0;
+ Number dual_inf, constr_viol, complementarity, kkt_error;
+ int rstatus = 0;
+
+ if(getDoubleMatrixFromScilab(6, &nVars, &x0_cols, &x0))
+ {
+ return 1;
+ }
+
+ if(getDoubleMatrixFromScilab(7, &x0_rows, &x0_cols, &lb))
+ {
+ return 1;
+ }
+
+ if(getDoubleMatrixFromScilab(8, &x0_rows, &x0_cols, &ub))
+ {
+ return 1;
+ }
+
+ if(getDoubleMatrixFromScilab(9, &nCons, &x0_cols, &conLb))
+ {
+ return 1;
+ }
+
+ if(getDoubleMatrixFromScilab(10, &x0_rows, &x0_cols, &conUb))
+ {
+ return 1;
+ }
+
+ // Getting intcon
+ if (getDoubleMatrixFromScilab(11,&intconSize,&temp2,&intcon))
+ {
+ return 1;
+ }
+
+ if (getDoubleMatrixFromScilab(13,&temp1,&temp2,&LC))
+ {
+ return 1;
+ }
+
+ //Initialization of parameters
+ temp1 = 1;
+ temp2 = 1;
+
+ //Getting parameters
+ if (getFixedSizeDoubleMatrixInList(12,2,temp1,temp2,&integertolerance))
+ {
+ return 1;
+ }
+ if (getFixedSizeDoubleMatrixInList(12,4,temp1,temp2,&maxnodes))
+ {
+ return 1;
+ }
+ if (getFixedSizeDoubleMatrixInList(12,6,temp1,temp2,&cputime))
+ {
+ return 1;
+ }
+ if (getFixedSizeDoubleMatrixInList(12,8,temp1,temp2,&allowablegap))
+ {
+ return 1;
+ }
+ if (getFixedSizeDoubleMatrixInList(12,10,temp1,temp2,&max_iter))
+ {
+ return 1;
+ }
+
+ SmartPtr<minconTMINLP> tminlp = new minconTMINLP(nVars,x0,lb,ub,(unsigned int)LC,nCons,conLb,conUb,intconSize,intcon);
+
+ BonminSetup bonmin;
+ bonmin.initializeOptionsAndJournalist();
+ bonmin.options()->SetStringValue("mu_oracle","loqo");
+ bonmin.options()->SetNumericValue("bonmin.integer_tolerance", *integertolerance);
+ bonmin.options()->SetIntegerValue("bonmin.node_limit", (int)*maxnodes);
+ bonmin.options()->SetNumericValue("bonmin.time_limit", *cputime);
+ bonmin.options()->SetNumericValue("bonmin.allowable_gap", *allowablegap);
+ bonmin.options()->SetIntegerValue("bonmin.iteration_limit", (int)*max_iter);
+
+ //Now initialize from tminlp
+ bonmin.initialize(GetRawPtr(tminlp));
+
+ //Set up done, now let's branch and bound
+ try {
+ Bab bb;
+ bb(bonmin);//process parameter file using Ipopt and do branch and bound using Cbc
+ }
+ catch(TNLPSolver::UnsolvedError *E) {
+ Scierror(999, "\nIpopt has failed to solve the problem!\n");
+ }
+ catch(OsiTMINLPInterface::SimpleError &E) {
+ Scierror(999, "\nFailed to solve a problem!\n");
+ }
+ catch(CoinError &E) {
+ Scierror(999, "\nFailed to solve a problem!\n");
+ }
+ rstatus=tminlp->returnStatus();
+
+ if(rstatus==0 ||rstatus== 3)
+ {
+ fX = tminlp->getX();
+ ObjVal = tminlp->getObjVal();
+ if (returnDoubleMatrixToScilab(1, nVars, 1, fX))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(2, 1, 1, &ObjVal))
+ {
+ return 1;
+ }
+
+ if (returnIntegerMatrixToScilab(3, 1, 1, &rstatus))
+ {
+ return 1;
+ }
+
+ }
+ else
+ {
+ if (returnDoubleMatrixToScilab(1, 0, 0, fX))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(2, 1, 1, &ObjVal))
+ {
+ return 1;
+ }
+
+ if (returnIntegerMatrixToScilab(3, 1, 1, &rstatus))
+ {
+ return 1;
+ }
+
+ }
+
+ return 0;
+ }
+}
+
diff --git a/build/cpp/cpp_intfminunc.cpp b/build/cpp/cpp_intfminunc.cpp
new file mode 100644
index 0000000..233ead3
--- /dev/null
+++ b/build/cpp/cpp_intfminunc.cpp
@@ -0,0 +1,174 @@
+// Copyright (C) 2016 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Harpreet Singh, Pranav Deshpande and Akshay Miterani
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+
+#include "CoinPragma.hpp"
+#include "CoinTime.hpp"
+#include "CoinError.hpp"
+
+#include "BonOsiTMINLPInterface.hpp"
+#include "BonIpoptSolver.hpp"
+#include "minuncTMINLP.hpp"
+#include "BonCbc.hpp"
+#include "BonBonminSetup.hpp"
+
+#include "BonOACutGenerator2.hpp"
+#include "BonEcpCuts.hpp"
+#include "BonOaNlpOptim.hpp"
+
+#include "sci_iofunc.hpp"
+extern "C"
+{
+#include "call_scilab.h"
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <BOOL.h>
+#include <localization.h>
+#include <sciprint.h>
+
+int cpp_intfminunc(char *fname)
+{
+ using namespace Ipopt;
+ using namespace Bonmin;
+
+ CheckInputArgument(pvApiCtx, 8, 8); // We need total 12 input arguments.
+ CheckOutputArgument(pvApiCtx, 3, 3); // 3 output arguments
+
+ //Function pointers, input matrix(Starting point) pointer, flag variable
+ int* funptr=NULL;
+ double* x0ptr=NULL;
+
+ // Input arguments
+ Number *integertolerance=NULL, *maxnodes=NULL, *allowablegap=NULL, *cputime=NULL,*max_iter=NULL;
+ static unsigned int nVars = 0,nCons = 0;
+ unsigned int temp1 = 0,temp2 = 0, iret = 0;
+ int x0_rows, x0_cols;
+ double *intcon = NULL,*options=NULL, *ifval=NULL;
+ int intconSize;
+
+ // Output arguments
+ double *fX = NULL, ObjVal=0,iteration=0,cpuTime=0,fobj_eval=0;
+ double dual_inf, constr_viol, complementarity, kkt_error;
+ int rstatus = 0;
+ int int_fobj_eval, int_constr_eval, int_fobj_grad_eval, int_constr_jac_eval, int_hess_eval;
+
+ //x0(starting point) matrix from scilab
+ if(getDoubleMatrixFromScilab(4, &x0_rows, &x0_cols, &x0ptr))
+ {
+ return 1;
+ }
+
+ nVars=x0_rows;
+
+ // Getting intcon
+ if (getDoubleMatrixFromScilab(5,&intconSize,&temp2,&intcon))
+ {
+ return 1;
+ }
+
+ temp1 = 1;
+ temp2 = 1;
+
+ //Getting parameters
+ if (getFixedSizeDoubleMatrixInList(6,2,temp1,temp2,&integertolerance))
+ {
+ return 1;
+ }
+ if (getFixedSizeDoubleMatrixInList(6,4,temp1,temp2,&maxnodes))
+ {
+ return 1;
+ }
+ if (getFixedSizeDoubleMatrixInList(6,6,temp1,temp2,&cputime))
+ {
+ return 1;
+ }
+ if (getFixedSizeDoubleMatrixInList(6,8,temp1,temp2,&allowablegap))
+ {
+ return 1;
+ }
+ if (getFixedSizeDoubleMatrixInList(6,10,temp1,temp2,&max_iter))
+ {
+ return 1;
+ }
+
+ SmartPtr<minuncTMINLP> tminlp = new minuncTMINLP(nVars, x0ptr, intconSize, intcon);
+
+ BonminSetup bonmin;
+ bonmin.initializeOptionsAndJournalist();
+
+ // Here we can change the default value of some Bonmin or Ipopt option
+ bonmin.options()->SetStringValue("mu_oracle","loqo");
+ bonmin.options()->SetNumericValue("bonmin.integer_tolerance", *integertolerance);
+ bonmin.options()->SetIntegerValue("bonmin.node_limit", (int)*maxnodes);
+ bonmin.options()->SetNumericValue("bonmin.time_limit", *cputime);
+ bonmin.options()->SetNumericValue("bonmin.allowable_gap", *allowablegap);
+ bonmin.options()->SetIntegerValue("bonmin.iteration_limit", (int)*max_iter);
+
+ //Now initialize from tminlp
+ bonmin.initialize(GetRawPtr(tminlp));
+
+ //Set up done, now let's branch and bound
+ try {
+ Bab bb;
+ bb(bonmin);//process parameter file using Ipopt and do branch and bound using Cbc
+ }
+ catch(TNLPSolver::UnsolvedError *E) {
+ //There has been a failure to solve a problem with Ipopt.
+ Scierror(999, "\nIpopt has failed to solve the problem!\n");
+ }
+ catch(OsiTMINLPInterface::SimpleError &E) {
+ Scierror(999, "\nFailed to solve a problem!\n");
+ }
+ catch(CoinError &E) {
+ Scierror(999, "\nFailed to solve a problem!\n");
+ }
+ rstatus=tminlp->returnStatus();
+ if(rstatus==0 ||rstatus== 3)
+ {
+ fX = tminlp->getX();
+ ObjVal = tminlp->getObjVal();
+ if (returnDoubleMatrixToScilab(1, nVars, 1, fX))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(2, 1, 1, &ObjVal))
+ {
+ return 1;
+ }
+
+ if (returnIntegerMatrixToScilab(3, 1, 1, &rstatus))
+ {
+ return 1;
+ }
+
+ }
+ else
+ {
+ if (returnDoubleMatrixToScilab(1, 0, 0, fX))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(2, 1, 1, &ObjVal))
+ {
+ return 1;
+ }
+
+ if (returnIntegerMatrixToScilab(3, 1, 1, &rstatus))
+ {
+ return 1;
+ }
+ }
+
+ return 0;
+ }
+}
+
diff --git a/build/cpp/minbndTMINLP.hpp b/build/cpp/minbndTMINLP.hpp
new file mode 100644
index 0000000..7c9070b
--- /dev/null
+++ b/build/cpp/minbndTMINLP.hpp
@@ -0,0 +1,114 @@
+// Copyright (C) 2016 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Harpreet Singh, Pranav Deshpande and Akshay Miterani
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+
+#ifndef minbndTMINLP_HPP
+#define minbndTMINLP_HPP
+
+#include "BonTMINLP.hpp"
+#include "IpTNLP.hpp"
+#include "call_scilab.h"
+
+using namespace Ipopt;
+using namespace Bonmin;
+
+class minbndTMINLP : public TMINLP
+{
+ private:
+
+ Index numVars_; //Number of input variables
+
+ Index intconSize_;
+
+ Number *lb_= NULL; //lb_ is a pointer to a matrix of size of 1*numVars_ with lower bound of all variables.
+
+ Number *ub_= NULL; //ub_ is a pointer to a matrix of size of 1*numVars_ with upper bound of all variables.
+
+ Number *finalX_= NULL; //finalX_ is a pointer to a matrix of size of 1*numVars_ with final value for the primal variables.
+
+ Number finalObjVal_; //finalObjVal_ is a scalar with the final value of the objective.
+
+ Number *intcon_ = NULL;
+
+ int status_; //Solver return status
+ minbndTMINLP(const minbndTMINLP&);
+ minbndTMINLP& operator=(const minbndTMINLP&);
+
+public:
+ // Constructor
+ minbndTMINLP(Index nV, Number *lb, Number *ub, Index intconSize, Number *intcon):numVars_(nV),lb_(lb),ub_(ub),intconSize_(intconSize),intcon_(intcon),finalX_(0),finalObjVal_(1e20){ }
+
+ /** default destructor */
+ virtual ~minbndTMINLP();
+
+ virtual bool get_variables_types(Index n, VariableType* var_types);
+
+ virtual bool get_variables_linearity(Index n, Ipopt::TNLP::LinearityType* var_types);
+
+ virtual bool get_constraints_linearity(Index m, Ipopt::TNLP::LinearityType* const_types);
+
+ /** Method to return some info about the nlp */
+ virtual bool get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
+ Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style);
+
+ /** Method to return the bounds for my problem */
+ virtual bool get_bounds_info(Index n, Number* x_l, Number* x_u,
+ Index m, Number* g_l, Number* g_u);
+
+ /** Method to return the starting point for the algorithm */
+ virtual bool get_starting_point(Index n, bool init_x, Number* x,
+ bool init_z, Number* z_L, Number* z_U,
+ Index m, bool init_lambda,
+ Number* lambda);
+
+ /** Method to return the objective value */
+ virtual bool eval_f(Index n, const Number* x, bool new_x, Number& obj_value);
+
+ /** Method to return the gradient of the objective */
+ virtual bool eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f);
+
+ /** Method to return the constraint residuals */
+ virtual bool eval_g(Index n, const Number* x, bool new_x, Index m, Number* g);
+
+ /** Method to return:
+ * 1) The structure of the jacobian (if "values" is NULL)
+ * 2) The values of the jacobian (if "values" is not NULL)
+ */
+ virtual bool eval_jac_g(Index n, const Number* x, bool new_x,Index m, Index nele_jac, Index* iRow, Index *jCol,Number* values);
+
+ /** Method to return:
+ * 1) The structure of the hessian of the lagrangian (if "values" is NULL)
+ * 2) The values of the hessian of the lagrangian (if "values" is not NULL)
+ */
+ virtual bool eval_h(Index n, const Number* x, bool new_x,Number obj_factor, Index m, const Number* lambda,bool new_lambda, Index nele_hess, Index* iRow,Index* jCol, Number* values);
+
+ /** This method is called when the algorithm is complete so the TNLP can store/write the solution */
+ virtual void finalize_solution(SolverReturn status,Index n, const Number* x, Number obj_value);
+
+ virtual const SosInfo * sosConstraints() const{return NULL;}
+ virtual const BranchingInfo* branchingInfo() const{return NULL;}
+
+ const double * getX(); //Returns a pointer to a matrix of size of 1*numVars_
+ //with final value for the primal variables.
+
+ const double * getGrad(); //Returns a pointer to a matrix of size of 1*numVars_
+ //with final value of gradient for the primal variables.
+
+ const double * getHess(); //Returns a pointer to a matrix of size of numVars_*numVars_
+ //with final value of hessian for the primal variables.
+
+ double getObjVal(); //Returns the output of the final value of the objective.
+
+ double iterCount(); //Returns the iteration count
+
+ int returnStatus(); //Returns the status count
+};
+
+#endif
diff --git a/build/cpp/minconTMINLP.hpp b/build/cpp/minconTMINLP.hpp
new file mode 100644
index 0000000..5b3006a
--- /dev/null
+++ b/build/cpp/minconTMINLP.hpp
@@ -0,0 +1,124 @@
+// Copyright (C) 2016 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Harpreet Singh, Pranav Deshpande and Akshay Miterani
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+
+#ifndef minconTMINLP_HPP
+#define minconTMINLP_HPP
+
+#include "BonTMINLP.hpp"
+#include "IpTNLP.hpp"
+#include "call_scilab.h"
+
+using namespace Ipopt;
+using namespace Bonmin;
+
+class minconTMINLP : public TMINLP
+{
+ private:
+
+ Index numVars_; //Number of variables
+
+ Index numCons_; //Number of constraints
+
+ Index numLC_; //Number of Linear constraints
+
+ Index intconSize_;
+
+ Number *x0_= NULL; //lb_ is a pointer to a matrix of size of 1*numVars_ with lower bound of all variables.
+
+ Number *lb_= NULL; //lb_ is a pointer to a matrix of size of 1*numVars_ with lower bound of all variables.
+
+ Number *ub_= NULL; //ub_ is a pointer to a matrix of size of 1*numVars_ with upper bound of all variables.
+
+ Number *conLb_= NULL; //conLb_ is a pointer to a matrix of size of numCon_*1 with lower bound of all constraints.
+
+ Number *conUb_= NULL; //conUb_ is a pointer to a matrix of size of numCon_*1 with upper bound of all constraints.
+
+ Number *finalX_= NULL; //finalX_ is a pointer to a matrix of size of 1*numVars_ with final value for the primal variables.
+
+ Number finalObjVal_; //finalObjVal_ is a scalar with the final value of the objective.
+
+ Number *intcon_ = NULL;
+
+ int status_; //Solver return status
+ minconTMINLP(const minconTMINLP&);
+ minconTMINLP& operator=(const minconTMINLP&);
+
+public:
+ // Constructor
+ minconTMINLP(Index nV, Number *x0, Number *lb, Number *ub, Index nLC, Index nCons, Number *conlb, Number *conub, Index intconSize, Number *intcon):numVars_(nV),x0_(x0),lb_(lb),ub_(ub),numLC_(nLC),numCons_(nCons),conLb_(conlb),conUb_(conub),intconSize_(intconSize),intcon_(intcon),finalX_(0),finalObjVal_(1e20){ }
+
+ /** default destructor */
+ virtual ~minconTMINLP();
+
+ virtual bool get_variables_types(Index n, VariableType* var_types);
+
+ virtual bool get_variables_linearity(Index n, Ipopt::TNLP::LinearityType* var_types);
+
+ virtual bool get_constraints_linearity(Index m, Ipopt::TNLP::LinearityType* const_types);
+
+ /** Method to return some info about the nlp */
+ virtual bool get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
+ Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style);
+
+ /** Method to return the bounds for my problem */
+ virtual bool get_bounds_info(Index n, Number* x_l, Number* x_u,
+ Index m, Number* g_l, Number* g_u);
+
+ /** Method to return the starting point for the algorithm */
+ virtual bool get_starting_point(Index n, bool init_x, Number* x,
+ bool init_z, Number* z_L, Number* z_U,
+ Index m, bool init_lambda,
+ Number* lambda);
+
+ /** Method to return the objective value */
+ virtual bool eval_f(Index n, const Number* x, bool new_x, Number& obj_value);
+
+ /** Method to return the gradient of the objective */
+ virtual bool eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f);
+
+ /** Method to return the constraint residuals */
+ virtual bool eval_g(Index n, const Number* x, bool new_x, Index m, Number* g);
+
+ /** Method to return:
+ * 1) The structure of the jacobian (if "values" is NULL)
+ * 2) The values of the jacobian (if "values" is not NULL)
+ */
+ virtual bool eval_jac_g(Index n, const Number* x, bool new_x,Index m, Index nele_jac, Index* iRow, Index *jCol,Number* values);
+
+ /** Method to return:
+ * 1) The structure of the hessian of the lagrangian (if "values" is NULL)
+ * 2) The values of the hessian of the lagrangian (if "values" is not NULL)
+ */
+ virtual bool eval_h(Index n, const Number* x, bool new_x,Number obj_factor, Index m, const Number* lambda,bool new_lambda, Index nele_hess, Index* iRow,Index* jCol, Number* values);
+
+ /** This method is called when the algorithm is complete so the TNLP can store/write the solution */
+ virtual void finalize_solution(SolverReturn status,Index n, const Number* x, Number obj_value);
+
+ virtual const SosInfo * sosConstraints() const{return NULL;}
+ virtual const BranchingInfo* branchingInfo() const{return NULL;}
+
+ const double * getX(); //Returns a pointer to a matrix of size of 1*numVars_
+ //with final value for the primal variables.
+
+ const double * getGrad(); //Returns a pointer to a matrix of size of 1*numVars_
+ //with final value of gradient for the primal variables.
+
+ const double * getHess(); //Returns a pointer to a matrix of size of numVars_*numVars_
+ //with final value of hessian for the primal variables.
+
+ double getObjVal(); //Returns the output of the final value of the objective.
+
+ double iterCount(); //Returns the iteration count
+
+ int returnStatus(); //Returns the status count
+};
+
+#endif
diff --git a/build/cpp/minuncTMINLP.hpp b/build/cpp/minuncTMINLP.hpp
new file mode 100644
index 0000000..2b6e954
--- /dev/null
+++ b/build/cpp/minuncTMINLP.hpp
@@ -0,0 +1,113 @@
+// Copyright (C) 2016 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Harpreet Singh, Pranav Deshpande and Akshay Miterani
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+
+#define __USE_DEPRECATED_STACK_FUNCTIONS__
+#ifndef minuncTMINLP_HPP
+#define minuncTMINLP_HPP
+
+#include "BonTMINLP.hpp"
+#include "IpTNLP.hpp"
+#include "call_scilab.h"
+
+using namespace Ipopt;
+using namespace Bonmin;
+
+class minuncTMINLP : public TMINLP
+{
+ private:
+
+ Index numVars_; //Number of input variables
+
+ Index intconSize_;
+
+ const Number *varGuess_= NULL; //varGuess_ is a pointer to a matrix of size of 1*numVars_ with initial guess of all variables.
+
+ Number *finalX_= NULL; //finalX_ is a pointer to a matrix of size of 1*numVars_ with final value for the primal variables.
+
+ Number finalObjVal_; //finalObjVal_ is a scalar with the final value of the objective.
+
+ Number *intcon_ = NULL;
+
+ int status_; //Solver return status
+ minuncTMINLP(const minuncTMINLP&);
+ minuncTMINLP& operator=(const minuncTMINLP&);
+
+public:
+ // Constructor
+ minuncTMINLP(Index nV, Number *x0, Index intconSize, Number *intcon):numVars_(nV),varGuess_(x0),intconSize_(intconSize),intcon_(intcon),finalX_(0),finalObjVal_(1e20){ }
+
+ /** default destructor */
+ virtual ~minuncTMINLP();
+
+ virtual bool get_variables_types(Index n, VariableType* var_types);
+
+ virtual bool get_variables_linearity(Index n, Ipopt::TNLP::LinearityType* var_types);
+
+ virtual bool get_constraints_linearity(Index m, Ipopt::TNLP::LinearityType* const_types);
+
+ /** Method to return some info about the nlp */
+ virtual bool get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
+ Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style);
+
+ /** Method to return the bounds for my problem */
+ virtual bool get_bounds_info(Index n, Number* x_l, Number* x_u,
+ Index m, Number* g_l, Number* g_u);
+
+ /** Method to return the starting point for the algorithm */
+ virtual bool get_starting_point(Index n, bool init_x, Number* x,
+ bool init_z, Number* z_L, Number* z_U,
+ Index m, bool init_lambda,
+ Number* lambda);
+
+ /** Method to return the objective value */
+ virtual bool eval_f(Index n, const Number* x, bool new_x, Number& obj_value);
+
+ /** Method to return the gradient of the objective */
+ virtual bool eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f);
+
+ /** Method to return the constraint residuals */
+ virtual bool eval_g(Index n, const Number* x, bool new_x, Index m, Number* g);
+
+ /** Method to return:
+ * 1) The structure of the jacobian (if "values" is NULL)
+ * 2) The values of the jacobian (if "values" is not NULL)
+ */
+ virtual bool eval_jac_g(Index n, const Number* x, bool new_x,Index m, Index nele_jac, Index* iRow, Index *jCol,Number* values);
+
+ /** Method to return:
+ * 1) The structure of the hessian of the lagrangian (if "values" is NULL)
+ * 2) The values of the hessian of the lagrangian (if "values" is not NULL)
+ */
+ virtual bool eval_h(Index n, const Number* x, bool new_x,Number obj_factor, Index m, const Number* lambda,bool new_lambda, Index nele_hess, Index* iRow,Index* jCol, Number* values);
+
+ /** This method is called when the algorithm is complete so the TNLP can store/write the solution */
+ virtual void finalize_solution(SolverReturn status,Index n, const Number* x, Number obj_value);
+
+ virtual const SosInfo * sosConstraints() const{return NULL;}
+ virtual const BranchingInfo* branchingInfo() const{return NULL;}
+
+ const double * getX(); //Returns a pointer to a matrix of size of 1*numVars_
+ //with final value for the primal variables.
+
+ const double * getGrad(); //Returns a pointer to a matrix of size of 1*numVars_
+ //with final value of gradient for the primal variables.
+
+ const double * getHess(); //Returns a pointer to a matrix of size of numVars_*numVars_
+ //with final value of hessian for the primal variables.
+
+ double getObjVal(); //Returns the output of the final value of the objective.
+
+ double iterCount(); //Returns the iteration count
+
+ int returnStatus(); //Returns the status count
+};
+
+#endif
diff --git a/build/cpp/sci_iofunc.cpp b/build/cpp/sci_iofunc.cpp
new file mode 100644
index 0000000..259f7c3
--- /dev/null
+++ b/build/cpp/sci_iofunc.cpp
@@ -0,0 +1,334 @@
+// Symphony Toolbox for Scilab
+// (Definition of) Functions for input and output from Scilab
+// By Keyur Joshi
+
+#include "api_scilab.h"
+#include "Scierror.h"
+#include "sciprint.h"
+#include "BOOL.h"
+#include <localization.h>
+#include "call_scilab.h"
+#include <string.h>
+
+
+using namespace std;
+
+int getDoubleFromScilab(int argNum, double *dest)
+{
+ //data declarations
+ SciErr sciErr;
+ int iRet,*varAddress;
+ const char errMsg[]="Wrong type for input argument #%d: A double is expected.\n";
+ const int errNum=999;
+ //get variable address
+ sciErr = getVarAddressFromPosition(pvApiCtx, argNum, &varAddress);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 1;
+ }
+ //check that it is a non-complex double
+ if ( !isDoubleType(pvApiCtx,varAddress) || isVarComplex(pvApiCtx,varAddress) )
+ {
+ Scierror(errNum,errMsg,argNum);
+ return 1;
+ }
+ //retrieve and store
+ iRet = getScalarDouble(pvApiCtx, varAddress, dest);
+ if(iRet)
+ {
+ Scierror(errNum,errMsg,argNum);
+ return 1;
+ }
+ return 0;
+}
+
+int getUIntFromScilab(int argNum, int *dest)
+{
+ SciErr sciErr;
+ int iRet,*varAddress;
+ double inputDouble;
+ const char errMsg[]="Wrong type for input argument #%d: A nonnegative integer is expected.\n";
+ const int errNum=999;
+ //same steps as above
+ sciErr = getVarAddressFromPosition(pvApiCtx, argNum, &varAddress);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 1;
+ }
+ if ( !isDoubleType(pvApiCtx,varAddress) || isVarComplex(pvApiCtx,varAddress) )
+ {
+ Scierror(errNum,errMsg,argNum);
+ return 1;
+ }
+ iRet = getScalarDouble(pvApiCtx, varAddress, &inputDouble);
+ //check that an unsigned int is stored in the double by casting and recasting
+ if(iRet || ((double)((unsigned int)inputDouble))!=inputDouble)
+ {
+ Scierror(errNum,errMsg,argNum);
+ return 1;
+ }
+ *dest=(unsigned int)inputDouble;
+ return 0;
+}
+
+int getIntFromScilab(int argNum, int *dest)
+{
+ SciErr sciErr;
+ int iRet,*varAddress;
+ double inputDouble;
+ const char errMsg[]="Wrong type for input argument #%d: An integer is expected.\n";
+ const int errNum=999;
+ //same steps as above
+ sciErr = getVarAddressFromPosition(pvApiCtx, argNum, &varAddress);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 1;
+ }
+ if ( !isDoubleType(pvApiCtx,varAddress) || isVarComplex(pvApiCtx,varAddress) )
+ {
+ Scierror(errNum,errMsg,argNum);
+ return 1;
+ }
+ iRet = getScalarDouble(pvApiCtx, varAddress, &inputDouble);
+ //check that an int is stored in the double by casting and recasting
+ if(iRet || ((double)((int)inputDouble))!=inputDouble)
+ {
+ Scierror(errNum,errMsg,argNum);
+ return 1;
+ }
+ *dest=(int)inputDouble;
+ return 0;
+}
+
+int getFixedSizeDoubleMatrixFromScilab(int argNum, int rows, int cols, double **dest)
+{
+ int *varAddress,inputMatrixRows,inputMatrixCols;
+ SciErr sciErr;
+ const char errMsg[]="Wrong type for input argument #%d: A matrix of double of size %d by %d is expected.\n";
+ const int errNum=999;
+ //same steps as above
+ sciErr = getVarAddressFromPosition(pvApiCtx, argNum, &varAddress);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 1;
+ }
+ if ( !isDoubleType(pvApiCtx,varAddress) || isVarComplex(pvApiCtx,varAddress) )
+ {
+ Scierror(errNum,errMsg,argNum,rows,cols);
+ return 1;
+ }
+ sciErr = getMatrixOfDouble(pvApiCtx, varAddress, &inputMatrixRows, &inputMatrixCols,NULL);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 1;
+ }
+ //check that the matrix has the correct number of rows and columns
+ if(inputMatrixRows!=rows || inputMatrixCols!=cols)
+ {
+ Scierror(errNum,errMsg,argNum,rows,cols);
+ return 1;
+ }
+ getMatrixOfDouble(pvApiCtx, varAddress, &inputMatrixRows, &inputMatrixCols, dest);
+ return 0;
+}
+
+int getDoubleMatrixFromScilab(int argNum, int *rows, int *cols, double **dest)
+{
+ int *varAddress;
+ SciErr sciErr;
+ const char errMsg[]="Wrong type for input argument #%d: A matrix of double is expected.\n";
+ const int errNum=999;
+ //same steps as above
+ sciErr = getVarAddressFromPosition(pvApiCtx, argNum, &varAddress);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 1;
+ }
+ if ( !isDoubleType(pvApiCtx,varAddress) || isVarComplex(pvApiCtx,varAddress) )
+ {
+ Scierror(errNum,errMsg,argNum);
+ return 1;
+ }
+ getMatrixOfDouble(pvApiCtx, varAddress, rows, cols, dest);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 1;
+ }
+ return 0;
+}
+
+int getFixedSizeDoubleMatrixInList(int argNum, int itemPos, int rows, int cols, double **dest)
+{
+ int *varAddress,inputMatrixRows,inputMatrixCols;
+ SciErr sciErr;
+ const char errMsg[]="Wrong type for input argument #%d: A matrix of double of size %d by %d is expected.\n";
+ const int errNum=999;
+ //same steps as above
+ sciErr = getVarAddressFromPosition(pvApiCtx, argNum, &varAddress);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 1;
+ }
+
+ getMatrixOfDoubleInList(pvApiCtx, varAddress, itemPos, &rows, &cols, dest);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 1;
+ }
+ return 0;
+}
+
+int getStringFromScilab(int argNum,char **dest)
+{
+ int *varAddress,inputMatrixRows,inputMatrixCols;
+ SciErr sciErr;
+ sciErr = getVarAddressFromPosition(pvApiCtx, argNum, &varAddress);
+
+ //check whether there is an error or not.
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 1;
+ }
+ if ( !isStringType(pvApiCtx,varAddress) )
+ {
+ Scierror(999,"Wrong type for input argument 1: A file name is expected.\n");
+ return 1;
+ }
+ //read the value in that pointer pointing to file name
+ getAllocatedSingleString(pvApiCtx, varAddress, dest);
+
+}
+
+bool getFunctionFromScilab(int n,char name[], double *x,int posFirstElementOnStackForSF,int nOfRhsOnSF,int nOfLhsOnSF, double **dest)
+{
+ double check;
+ createMatrixOfDouble(pvApiCtx, posFirstElementOnStackForSF, 1, n, x);
+ C2F(scistring)(&posFirstElementOnStackForSF,name,&nOfLhsOnSF,&nOfRhsOnSF,(unsigned long)strlen(name));
+
+ if(getDoubleFromScilab(posFirstElementOnStackForSF+1,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ return true;
+ }
+ else
+ {
+ int x_rows, x_cols;
+ if(getDoubleMatrixFromScilab(posFirstElementOnStackForSF, &x_rows, &x_cols, dest))
+ {
+ sciprint("No results ");
+ return true;
+
+ }
+ }
+ return 0;
+}
+
+bool getHessFromScilab(int n,int numConstr_,char name[], double *x,double *obj,double *lambda,int posFirstElementOnStackForSF,int nOfRhsOnSF,int nOfLhsOnSF, double **dest)
+{
+ double check;
+ createMatrixOfDouble(pvApiCtx, posFirstElementOnStackForSF, 1, n, x);
+ createMatrixOfDouble(pvApiCtx, posFirstElementOnStackForSF+1, 1, 1, obj);
+ createMatrixOfDouble(pvApiCtx, posFirstElementOnStackForSF+2, 1, numConstr_, lambda);
+ C2F(scistring)(&posFirstElementOnStackForSF,name,&nOfLhsOnSF,&nOfRhsOnSF,(unsigned long)strlen(name));
+
+ if(getDoubleFromScilab(posFirstElementOnStackForSF+1,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ return true;
+ }
+ else
+ {
+ int x_rows, x_cols;
+ if(getDoubleMatrixFromScilab(posFirstElementOnStackForSF, &x_rows, &x_cols, dest))
+ {
+ sciprint("No results ");
+ return 1;
+
+ }
+ }
+ return 0;
+}
+
+int return0toScilab()
+{
+ int iRet;
+ //create variable in scilab
+ iRet = createScalarDouble(pvApiCtx, nbInputArgument(pvApiCtx)+1,0);
+ if(iRet)
+ {
+ /* If error, no return variable */
+ AssignOutputVariable(pvApiCtx, 1) = 0;
+ return 1;
+ }
+ //make it the output variable
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx)+1;
+ //return it to scilab
+ //ReturnArguments(pvApiCtx);
+ return 0;
+}
+
+int returnDoubleToScilab(double retVal)
+{
+ int iRet;
+ //same steps as above
+ iRet = createScalarDouble(pvApiCtx, nbInputArgument(pvApiCtx)+1,retVal);
+ if(iRet)
+ {
+ /* If error, no return variable */
+ AssignOutputVariable(pvApiCtx, 1) = 0;
+ return 1;
+ }
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx)+1;
+ //ReturnArguments(pvApiCtx);
+ return 0;
+}
+
+int returnDoubleMatrixToScilab(int itemPos, int rows, int cols, double *dest)
+{
+ SciErr sciErr;
+ //same steps as above
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + itemPos, rows, cols, dest);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 1;
+ }
+
+ AssignOutputVariable(pvApiCtx, itemPos) = nbInputArgument(pvApiCtx)+itemPos;
+
+ return 0;
+}
+
+int returnIntegerMatrixToScilab(int itemPos, int rows, int cols, int *dest)
+{
+ SciErr sciErr;
+ //same steps as above
+ sciErr = createMatrixOfInteger32(pvApiCtx, nbInputArgument(pvApiCtx) + itemPos, rows, cols, dest);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 1;
+ }
+
+ AssignOutputVariable(pvApiCtx, itemPos) = nbInputArgument(pvApiCtx)+itemPos;
+
+ return 0;
+}
+
+
diff --git a/build/cpp/sci_iofunc.hpp b/build/cpp/sci_iofunc.hpp
new file mode 100644
index 0000000..7e18951
--- /dev/null
+++ b/build/cpp/sci_iofunc.hpp
@@ -0,0 +1,25 @@
+// Symphony Toolbox for Scilab
+// (Declaration of) Functions for input and output from Scilab
+// By Keyur Joshi
+
+#ifndef SCI_IOFUNCHEADER
+#define SCI_IOFUNCHEADER
+
+//input
+int getDoubleFromScilab(int argNum, double *dest);
+int getUIntFromScilab(int argNum, int *dest);
+int getIntFromScilab(int argNum, int *dest);
+int getFixedSizeDoubleMatrixFromScilab(int argNum, int rows, int cols, double **dest);
+int getDoubleMatrixFromScilab(int argNum, int *rows, int *cols, double **dest);
+int getFixedSizeDoubleMatrixInList(int argNum, int itemPos, int rows, int cols, double **dest);
+int getStringFromScilab(int argNum,char** dest);
+bool getFunctionFromScilab(int n,char name[], double *x,int posFirstElementOnStackForSF,int nOfRhsOnSF,int nOfLhsOnSF, double **dest);
+bool getHessFromScilab(int n,int numConstr_,char name[], double *x,double *obj,double *lambda,int posFirstElementOnStackForSF,int nOfRhsOnSF,int nOfLhsOnSF, double **dest);
+
+//output
+int return0toScilab();
+int returnDoubleToScilab(double retVal);
+int returnDoubleMatrixToScilab(int itemPos, int rows, int cols, double *dest);
+int returnIntegerMatrixToScilab(int itemPos, int rows, int cols, int *dest);
+
+#endif //SCI_IOFUNCHEADER
diff --git a/build/cpp/sci_minbndTMINLP.cpp b/build/cpp/sci_minbndTMINLP.cpp
new file mode 100644
index 0000000..405c4c3
--- /dev/null
+++ b/build/cpp/sci_minbndTMINLP.cpp
@@ -0,0 +1,218 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: Harpreet Singh, Pranav Deshpande and Akshay Miterani
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+
+#include "minbndTMINLP.hpp"
+#include "sci_iofunc.hpp"
+
+extern "C"
+{
+#include "call_scilab.h"
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <BOOL.h>
+#include <localization.h>
+#include <sciprint.h>
+#include <string.h>
+#include <assert.h>
+}
+
+using namespace Ipopt;
+using namespace Bonmin;
+
+minbndTMINLP::~minbndTMINLP()
+{
+ free(finalX_);
+}
+
+// Set the type of every variable - CONTINUOUS or INTEGER
+bool minbndTMINLP::get_variables_types(Index n, VariableType* var_types)
+{
+ n = numVars_;
+ for(int i=0; i < n; i++)
+ var_types[i] = CONTINUOUS;
+ for(int i=0 ; i < intconSize_ ; ++i)
+ var_types[(int)(intcon_[i]-1)] = INTEGER;
+ return true;
+}
+
+// The linearity of the variables - LINEAR or NON_LINEAR
+bool minbndTMINLP::get_variables_linearity(Index n, Ipopt::TNLP::LinearityType* var_types)
+{ return true; }
+
+// The linearity of the constraints - LINEAR or NON_LINEAR
+bool minbndTMINLP::get_constraints_linearity(Index m, Ipopt::TNLP::LinearityType* const_types)
+{ return true;}
+
+//get NLP info such as number of variables,constraints,no.of elements in jacobian and hessian to allocate memory
+bool minbndTMINLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g, Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style)
+{
+ n=numVars_; // Number of variables
+ m=0; // Number of constraints
+ nnz_jac_g = 0; // No. of elements in Jacobian of constraints
+ nnz_h_lag = n*(n+1)/2; // No. of elements in lower traingle of Hessian of the Lagrangian.
+ index_style=TNLP::C_STYLE; // Index style of matrices
+ return true;
+}
+
+//get variable and constraint bound info
+bool minbndTMINLP::get_bounds_info(Index n, Number* x_l, Number* x_u, Index m, Number* g_l, Number* g_u)
+{
+ unsigned int i;
+ for(i=0;i<n;i++)
+ {
+ x_l[i]=lb_[i]+0.0000001;
+ x_u[i]=ub_[i]-0.0000001;
+ }
+
+ g_l=NULL;
+ g_u=NULL;
+ return true;
+}
+
+// return the value of the constraints: g(x)
+bool minbndTMINLP::eval_g(Index n, const Number* x, bool new_x, Index m, Number* g)
+{
+ // return the value of the constraints: g(x)
+ g=NULL;
+ return true;
+}
+
+// return the structure or values of the jacobian
+bool minbndTMINLP::eval_jac_g(Index n, const Number* x, bool new_x,Index m, Index nele_jac, Index* iRow, Index *jCol,Number* values)
+{
+ if (values == NULL)
+ {
+ // return the structure of the jacobian of the constraints
+ iRow=NULL;
+ jCol=NULL;
+ }
+ else
+ {
+ values=NULL;
+ }
+ return true;
+}
+
+//get value of objective function at vector x
+bool minbndTMINLP::eval_f(Index n, const Number* x, bool new_x, Number& obj_value)
+{
+ char name[20]="_f";
+ Number *obj;
+ if (getFunctionFromScilab(n,name,x, 7, 1,2,&obj))
+ {
+ return false;
+ }
+ obj_value = *obj;
+ return true;
+}
+
+//get value of gradient of objective function at vector x.
+bool minbndTMINLP::eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f)
+{
+ char name[20]="_gradf";
+ Number *resg;
+ if (getFunctionFromScilab(n,name,x, 7, 1,2,&resg))
+ {
+ return false;
+ }
+
+ Index i;
+ for(i=0;i<numVars_;i++)
+ {
+ grad_f[i]=resg[i];
+ }
+ return true;
+}
+
+// This method sets initial values for required vectors . For now we are assuming 0 to all values.
+bool minbndTMINLP::get_starting_point(Index n, bool init_x, Number* x,bool init_z, Number* z_L, Number* z_U,Index m, bool init_lambda,Number* lambda)
+{
+ assert(init_x == true);
+ assert(init_z == false);
+ assert(init_lambda == false);
+ if (init_x == true)
+ { //we need to set initial values for vector x
+ for (Index var=0;var<n;var++)
+ {x[var]=0.0;}//initialize with 0.
+ }
+ return true;
+}
+
+/*
+ * Return either the sparsity structure of the Hessian of the Lagrangian,
+ * or the values of the Hessian of the Lagrangian for the given values for
+ * x,lambda,obj_factor.
+*/
+
+bool minbndTMINLP::eval_h(Index n, const Number* x, bool new_x,Number obj_factor, Index m, const Number* lambda,bool new_lambda, Index nele_hess, Index* iRow,Index* jCol, Number* values)
+{
+ double check;
+ if (values==NULL)
+ {
+ Index idx=0;
+ for (Index row = 0; row < numVars_; row++)
+ {
+ for (Index col = 0; col <= row; col++)
+ { iRow[idx] = row;
+ jCol[idx] = col;
+ idx++;
+ }
+ }
+ }
+
+ else
+ { char name[20]="_gradhess";
+ Number *resh;
+ if (getFunctionFromScilab(n,name,x, 7, 1,2,&resh))
+ {
+ return false;
+ }
+ Index index=0;
+ for (Index row=0;row < numVars_ ;++row)
+ {
+ for (Index col=0; col <= row; ++col)
+ {
+ values[index++]=obj_factor*(resh[numVars_*row+col]);
+ }
+ }
+ }
+ return true;
+}
+
+void minbndTMINLP::finalize_solution(SolverReturn status,Index n, const Number* x, Number obj_value)
+{
+ finalObjVal_ = obj_value;
+ status_ = status;
+ if(status==0 ||status== 3)
+ {
+ finalX_ = (double*)malloc(sizeof(double) * numVars_ * 1);
+ for (Index i=0; i<numVars_; i++)
+ {
+ finalX_[i] = x[i];
+ }
+ }
+
+}
+
+const double * minbndTMINLP::getX()
+{
+ return finalX_;
+}
+
+double minbndTMINLP::getObjVal()
+{
+ return finalObjVal_;
+}
+
+int minbndTMINLP::returnStatus()
+{
+ return status_;
+}
diff --git a/build/cpp/sci_minconTMINLP.cpp b/build/cpp/sci_minconTMINLP.cpp
new file mode 100644
index 0000000..ac688d4
--- /dev/null
+++ b/build/cpp/sci_minconTMINLP.cpp
@@ -0,0 +1,311 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: Harpreet Singh, Pranav Deshpande and Akshay Miterani
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+
+#include "minconTMINLP.hpp"
+#include "sci_iofunc.hpp"
+
+extern "C"
+{
+#include "call_scilab.h"
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <BOOL.h>
+#include <localization.h>
+#include <sciprint.h>
+#include <string.h>
+#include <assert.h>
+}
+
+using namespace Ipopt;
+using namespace Bonmin;
+
+#define DEBUG 0
+
+minconTMINLP::~minconTMINLP()
+{
+ free(finalX_);
+}
+
+// Set the type of every variable - CONTINUOUS or INTEGER
+bool minconTMINLP::get_variables_types(Index n, VariableType* var_types)
+{
+ n = numVars_;
+ for(int i=0; i < n; i++)
+ var_types[i] = CONTINUOUS;
+ for(int i=0 ; i < intconSize_ ; ++i)
+ var_types[(int)(intcon_[i]-1)] = INTEGER;
+ return true;
+}
+
+// The linearity of the variables - LINEAR or NON_LINEAR
+bool minconTMINLP::get_variables_linearity(Index n, Ipopt::TNLP::LinearityType* var_types)
+{
+ for(int i=0;i<n;i++)
+ {
+ var_types[i] = Ipopt::TNLP::NON_LINEAR;
+ }
+ return true; }
+
+// The linearity of the constraints - LINEAR or NON_LINEAR
+bool minconTMINLP::get_constraints_linearity(Index m, Ipopt::TNLP::LinearityType* const_types)
+{
+ for(int i=0;i<numLC_;i++)
+ {
+ const_types[i] = Ipopt::TNLP::LINEAR;
+ }
+
+ for(int i=numLC_;i<m;i++)
+ {
+ const_types[i] = Ipopt::TNLP::NON_LINEAR;
+ }
+ return true;}
+
+//get NLP info such as number of variables,constraints,no.of elements in jacobian and hessian to allocate memory
+bool minconTMINLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g, Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style)
+{
+ n=numVars_; // Number of variables
+ m=numCons_; // Number of constraints
+ nnz_jac_g = n*m; // No. of elements in Jacobian of constraints
+ nnz_h_lag = n*n; // No. of elements in Hessian of the Lagrangian.
+ index_style=TNLP::C_STYLE; // Index style of matrices
+ return true;
+}
+
+//get variable and constraint bound info
+bool minconTMINLP::get_bounds_info(Index n, Number* x_l, Number* x_u, Index m, Number* g_l, Number* g_u)
+{
+ #ifdef DEBUG
+ sciprint("Code is in get_bounds_info\n");
+ #endif
+ unsigned int i;
+ for(i=0;i<n;i++)
+ {
+ x_l[i]=lb_[i];
+ x_u[i]=ub_[i];
+ }
+ for(i=0;i<m;i++)
+ {
+ g_l[i]=conLb_[i];
+ g_u[i]=conUb_[i];
+ }
+ return true;
+}
+
+// return the value of the constraints: g(x)
+bool minconTMINLP::eval_g(Index n, const Number* x, bool new_x, Index m, Number* g)
+{
+ #ifdef DEBUG
+ sciprint("Code is in eval_g\n");
+ #endif
+ // return the value of the constraints: g(x)
+ if(m==0)
+ {
+ g=NULL;
+ }
+ else
+ {
+ char name[20]="_addnlc";
+ Number *con;
+ if (getFunctionFromScilab(n,name,x, 7, 1,2,&con))
+ {
+ return false;
+ }
+
+ Index i;
+ for(i=0;i<m;i++)
+ {
+ g[i]=con[i];
+ }
+ }
+
+ return true;
+}
+
+// return the structure or values of the jacobian
+bool minconTMINLP::eval_jac_g(Index n, const Number* x, bool new_x,Index m, Index nele_jac, Index* iRow, Index *jCol,Number* values)
+{
+ #ifdef DEBUG
+ sciprint("Code is in eval_jac_g\n");
+ #endif
+ if (values == NULL)
+ {
+ if(m==0)// return the structure of the jacobian of the constraints
+ {
+ iRow=NULL;
+ jCol=NULL;
+ }
+ else
+ {
+ unsigned int i,j,idx=0;
+ for(int i=0;i<m;i++)
+ for(j=0;j<n;j++)
+ {
+ iRow[idx]=i;
+ jCol[idx]=j;
+ idx++;
+ }
+ }
+ }
+ else
+ {
+ if(m==0)
+ {
+ values=NULL;
+ }
+ else
+ {
+ double* resj;
+ char name[20]="_gradnlc";
+ if (getFunctionFromScilab(n,name,x, 7, 1,2,&resj))
+ {
+ return false;
+ }
+ int c = 0;
+ for(int i=0;i<m;i++)
+ {
+ for(int j=0;j<n;j++)
+ {
+ values[c] = resj[j*(int)m+i];
+ c++;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+//get value of objective function at vector x
+bool minconTMINLP::eval_f(Index n, const Number* x, bool new_x, Number& obj_value)
+{
+ #ifdef DEBUG
+ sciprint("Code is eval_f\n");
+ #endif
+ char name[20]="_f";
+ Number *obj;
+ if (getFunctionFromScilab(n,name,x, 7, 1,2,&obj))
+ {
+ return false;
+ }
+ obj_value = *obj;
+ return true;
+}
+
+//get value of gradient of objective function at vector x.
+bool minconTMINLP::eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f)
+{
+ #ifdef DEBUG
+ sciprint("Code is in eval_grad_f\n");
+ #endif
+ char name[20]="_gradf";
+ Number *resg;
+ if (getFunctionFromScilab(n,name,x, 7, 1,2,&resg))
+ {
+ return false;
+ }
+
+ Index i;
+ for(i=0;i<numVars_;i++)
+ {
+ grad_f[i]=resg[i];
+ }
+
+ return true;
+}
+
+// This method sets initial values for required vectors . For now we are assuming 0 to all values.
+bool minconTMINLP::get_starting_point(Index n, bool init_x, Number* x,bool init_z, Number* z_L, Number* z_U,Index m, bool init_lambda,Number* lambda)
+{
+ assert(init_x == true);
+ assert(init_z == false);
+ assert(init_lambda == false);
+ if (init_x == true)
+ { //we need to set initial values for vector x
+ for (Index var=0;var<n;var++)
+ {x[var]=x0_[var];}//initialize with 0.
+ }
+ return true;
+}
+
+/*
+ * Return either the sparsity structure of the Hessian of the Lagrangian,
+ * or the values of the Hessian of the Lagrangian for the given values for
+ * x,lambda,obj_factor.
+*/
+
+bool minconTMINLP::eval_h(Index n, const Number* x, bool new_x,Number obj_factor, Index m, const Number* lambda,bool new_lambda, Index nele_hess, Index* iRow,Index* jCol, Number* values)
+{
+ #ifdef DEBUG
+ sciprint("Code is in eval_h\n");
+ #endif
+ double check;
+ if (values==NULL)
+ {
+ Index idx=0;
+ for (Index row = 0; row < numVars_; row++)
+ {
+ for (Index col = 0; col <= row; col++)
+ { iRow[idx] = row;
+ jCol[idx] = col;
+ idx++;
+ }
+ }
+ }
+ else
+ { char name[20]="_gradhess";
+ Number *resh;
+ if (getHessFromScilab(n,m,name,x, &obj_factor, lambda, 7, 3,2,&resh))
+ {
+ return false;
+ }
+ Index index=0;
+ for (Index row=0;row < numVars_ ;++row)
+ {
+ for (Index col=0; col <= row; ++col)
+ {
+ values[index++]=(resh[numVars_*row+col]);
+ }
+ }
+ }
+ return true;
+}
+
+void minconTMINLP::finalize_solution(SolverReturn status,Index n, const Number* x, Number obj_value)
+{
+ #ifdef DEBUG
+ sciprint("Code is in finalize_solution\n");
+ #endif
+ finalObjVal_ = obj_value;
+ status_ = status;
+ if(status==0 ||status== 3)
+ {
+ finalX_ = (double*)malloc(sizeof(double) * numVars_ * 1);
+ for (Index i=0; i<numVars_; i++)
+ {
+ finalX_[i] = x[i];
+ }
+ }
+
+}
+
+const double * minconTMINLP::getX()
+{
+ return finalX_;
+}
+
+double minconTMINLP::getObjVal()
+{
+ return finalObjVal_;
+}
+
+int minconTMINLP::returnStatus()
+{
+ return status_;
+}
diff --git a/build/cpp/sci_minconTMINLP.cpp~ b/build/cpp/sci_minconTMINLP.cpp~
new file mode 100644
index 0000000..2b9cbc3
--- /dev/null
+++ b/build/cpp/sci_minconTMINLP.cpp~
@@ -0,0 +1,267 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: Harpreet Singh, Pranav Deshpande and Akshay Miterani
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+
+#include "minconTMINLP.hpp"
+#include "sci_iofunc.hpp"
+
+extern "C"
+{
+#include "call_scilab.h"
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <BOOL.h>
+#include <localization.h>
+#include <sciprint.h>
+#include <string.h>
+#include <assert.h>
+}
+
+using namespace Ipopt;
+using namespace Bonmin;
+
+minconTMINLP::~minconTMINLP()
+{
+ free(finalX_);
+}
+
+// Set the type of every variable - CONTINUOUS or INTEGER
+bool minconTMINLP::get_variables_types(Index n, VariableType* var_types)
+{
+ n = numVars_;
+ for(int i=0; i < n; i++)
+ var_types[i] = CONTINUOUS;
+ for(int i=0 ; i < intconSize_ ; ++i)
+ var_types[(int)(intcon_[i]-1)] = INTEGER;
+ return true;
+}
+
+// The linearity of the variables - LINEAR or NON_LINEAR
+bool minconTMINLP::get_variables_linearity(Index n, Ipopt::TNLP::LinearityType* var_types)
+{ return true; }
+
+// The linearity of the constraints - LINEAR or NON_LINEAR
+bool minconTMINLP::get_constraints_linearity(Index m, Ipopt::TNLP::LinearityType* const_types)
+{ return true;}
+
+//get NLP info such as number of variables,constraints,no.of elements in jacobian and hessian to allocate memory
+bool minconTMINLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g, Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style)
+{
+ n=numVars_; // Number of variables
+ m=numCons_; // Number of constraints
+ nnz_jac_g = 0; // No. of elements in Jacobian of constraints
+ nnz_h_lag = n*(n+1)/2; // No. of elements in lower traingle of Hessian of the Lagrangian.
+ index_style=TNLP::C_STYLE; // Index style of matrices
+ return true;
+}
+
+//get variable and constraint bound info
+bool minconTMINLP::get_bounds_info(Index n, Number* x_l, Number* x_u, Index m, Number* g_l, Number* g_u)
+{
+ unsigned int i;
+ for(i=0;i<n;i++)
+ {
+ x_l[i]=lb_[i];
+ x_u[i]=ub_[i];
+ }
+
+ for(i=0;i<m;i++)
+ g_l=conLb_[i];
+ g_u=conUb_[i];
+ return true;
+}
+
+// return the value of the constraints: g(x)
+bool minconTMINLP::eval_g(Index n, const Number* x, bool new_x, Index m, Number* g)
+{
+ // return the value of the constraints: g(x)
+ if(m==0)
+ {
+ g=NULL;
+ }
+ else
+ {
+ char name[20]="_addnlc";
+ Number *con;
+ if (getFunctionFromScilab(n,name,x, 7, 1,2,&con))
+ {
+ return false;
+ }
+
+ Index i;
+ for(i=0;i<m;i++)
+ {
+ g[i]=con[i];
+ }
+ }
+ return true;
+}
+
+// return the structure or values of the jacobian
+bool minconTMINLP::eval_jac_g(Index n, const Number* x, bool new_x,Index m, Index nele_jac, Index* iRow, Index *jCol,Number* values)
+{
+ if (values == NULL)
+ {
+ if(m==0)// return the structure of the jacobian of the constraints
+ {
+ iRow=NULL;
+ jCol=NULL;
+ }
+ else
+ {
+ unsigned int i,j,idx=0;
+ for(int i=0;i<m;i++)
+ for(j=0;j<n;j++)
+ {
+ iRow[idx]=i;
+ jCol[idx]=j;
+ idx++;
+ }
+ }
+ }
+ else
+ {
+ if(m==0)
+ values=NULL;
+
+ else
+ {
+ double* resj;
+ if (getFunctionFromScilab(n,name,x, 7, 1,2,&resj))
+ {
+ return false;
+ }
+ for(j=0;j<n;j++)
+ {
+ values[c] = resj[j*(int)nonlinCon_+i];
+ c++;
+ }
+ }
+ }
+
+
+ return true;
+}
+
+//get value of objective function at vector x
+bool minconTMINLP::eval_f(Index n, const Number* x, bool new_x, Number& obj_value)
+{
+ char name[20]="_f";
+ Number *obj;
+ if (getFunctionFromScilab(n,name,x, 7, 1,2,&obj))
+ {
+ return false;
+ }
+ obj_value = *obj;
+ return true;
+}
+
+//get value of gradient of objective function at vector x.
+bool minconTMINLP::eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f)
+{
+ char name[20]="_gradf";
+ Number *resg;
+ if (getFunctionFromScilab(n,name,x, 7, 1,2,&resg))
+ {
+ return false;
+ }
+
+ Index i;
+ for(i=0;i<numVars_;i++)
+ {
+ grad_f[i]=resg[i];
+ }
+ return true;
+}
+
+// This method sets initial values for required vectors . For now we are assuming 0 to all values.
+bool minconTMINLP::get_starting_point(Index n, bool init_x, Number* x,bool init_z, Number* z_L, Number* z_U,Index m, bool init_lambda,Number* lambda)
+{
+ assert(init_x == true);
+ assert(init_z == false);
+ assert(init_lambda == false);
+ if (init_x == true)
+ { //we need to set initial values for vector x
+ for (Index var=0;var<n;var++)
+ {x[var]=x0_[i];}//initialize with 0.
+ }
+ return true;
+}
+
+/*
+ * Return either the sparsity structure of the Hessian of the Lagrangian,
+ * or the values of the Hessian of the Lagrangian for the given values for
+ * x,lambda,obj_factor.
+*/
+
+bool minconTMINLP::eval_h(Index n, const Number* x, bool new_x,Number obj_factor, Index m, const Number* lambda,bool new_lambda, Index nele_hess, Index* iRow,Index* jCol, Number* values)
+{
+ double check;
+ if (values==NULL)
+ {
+ Index idx=0;
+ for (Index row = 0; row < numVars_; row++)
+ {
+ for (Index col = 0; col <= row; col++)
+ { iRow[idx] = row;
+ jCol[idx] = col;
+ idx++;
+ }
+ }
+ }
+
+ else
+ { char name[20]="_gradhess";
+ Number *resh;
+ if (getFunctionFromScilab(n,name,x, 7, 1,2,&resh))
+ {
+ return false;
+ }
+ Index index=0;
+ for (Index row=0;row < numVars_ ;++row)
+ {
+ for (Index col=0; col <= row; ++col)
+ {
+ values[index++]=obj_factor*(resh[numVars_*row+col]);
+ }
+ }
+ }
+ return true;
+}
+
+void minconTMINLP::finalize_solution(SolverReturn status,Index n, const Number* x, Number obj_value)
+{
+ finalObjVal_ = obj_value;
+ status_ = status;
+ if(status==0 ||status== 3)
+ {
+ finalX_ = (double*)malloc(sizeof(double) * numVars_ * 1);
+ for (Index i=0; i<numVars_; i++)
+ {
+ finalX_[i] = x[i];
+ }
+ }
+
+}
+
+const double * minconTMINLP::getX()
+{
+ return finalX_;
+}
+
+double minconTMINLP::getObjVal()
+{
+ return finalObjVal_;
+}
+
+int minconTMINLP::returnStatus()
+{
+ return status_;
+}
diff --git a/build/cpp/sci_minuncTMINLP.cpp b/build/cpp/sci_minuncTMINLP.cpp
new file mode 100644
index 0000000..b02ab8e
--- /dev/null
+++ b/build/cpp/sci_minuncTMINLP.cpp
@@ -0,0 +1,237 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: Harpreet Singh, Pranav Deshpande and Akshay Miterani
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+
+#include "minuncTMINLP.hpp"
+#include "sci_iofunc.hpp"
+
+extern "C"
+{
+#include "call_scilab.h"
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <BOOL.h>
+#include <localization.h>
+#include <sciprint.h>
+#include <string.h>
+#include <assert.h>
+}
+
+using namespace std;
+using namespace Ipopt;
+using namespace Bonmin;
+
+minuncTMINLP::~minuncTMINLP()
+{
+ free(finalX_);
+}
+
+// Set the type of every variable - CONTINUOUS or INTEGER
+bool minuncTMINLP::get_variables_types(Index n, VariableType* var_types)
+{
+ n = numVars_;
+ for(int i=0; i < n; i++)
+ var_types[i] = CONTINUOUS;
+ for(int i=0 ; i < intconSize_ ; ++i)
+ var_types[(int)(intcon_[i]-1)] = INTEGER;
+ return true;
+}
+
+// The linearity of the variables - LINEAR or NON_LINEAR
+bool minuncTMINLP::get_variables_linearity(Index n, Ipopt::TNLP::LinearityType* var_types)
+{
+ /*
+ n = numVars_;
+ for(int i = 0; i < n; i++)
+ var_types[i] = Ipopt::TNLP::LINEAR;
+ */
+ return true;
+}
+
+// The linearity of the constraints - LINEAR or NON_LINEAR
+bool minuncTMINLP::get_constraints_linearity(Index m, Ipopt::TNLP::LinearityType* const_types)
+{
+ /* m = numConstr_;
+ for(int i = 0; i < m; i++)
+ const_types[i] = Ipopt::TNLP::LINEAR;
+ */
+ return true;
+}
+
+//get NLP info such as number of variables,constraints,no.of elements in jacobian and hessian to allocate memory
+bool minuncTMINLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g, Index& nnz_h_lag, TNLP::IndexStyleEnum& index_style)
+{
+ n=numVars_; // Number of variables
+ m=0; // Number of constraints
+ nnz_jac_g = 0; // No. of elements in Jacobian of constraints
+ nnz_h_lag = n*(n+1)/2; // No. of elements in lower traingle of Hessian of the Lagrangian.
+ index_style=TNLP::C_STYLE; // Index style of matrices
+ return true;
+}
+
+//get variable and constraint bound info
+bool minuncTMINLP::get_bounds_info(Index n, Number* x_l, Number* x_u, Index m, Number* g_l, Number* g_u)
+{
+ unsigned int i;
+ for(i=0;i<n;i++)
+ {
+ x_l[i]=-1.0e19;
+ x_u[i]=1.0e19;
+ }
+
+ g_l=NULL;
+ g_u=NULL;
+ return true;
+}
+
+// return the value of the constraints: g(x)
+bool minuncTMINLP::eval_g(Index n, const Number* x, bool new_x, Index m, Number* g)
+{
+ // return the value of the constraints: g(x)
+ g=NULL;
+ return true;
+}
+
+// return the structure or values of the jacobian
+bool minuncTMINLP::eval_jac_g(Index n, const Number* x, bool new_x,Index m, Index nele_jac, Index* iRow, Index *jCol,Number* values)
+{
+ if (values == NULL)
+ {
+ // return the structure of the jacobian of the constraints
+ iRow=NULL;
+ jCol=NULL;
+ }
+ else
+ {
+ values=NULL;
+ }
+
+ return true;
+}
+
+//get value of objective function at vector x
+bool minuncTMINLP::eval_f(Index n, const Number* x, bool new_x, Number& obj_value)
+{
+ char name[20]="_f";
+ Number *obj;
+ if (getFunctionFromScilab(n,name,x, 7, 1,2,&obj))
+ {
+ return false;
+ }
+ obj_value = *obj;
+ return true;
+}
+
+//get value of gradient of objective function at vector x.
+bool minuncTMINLP::eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f)
+{
+ char name[20]="_gradf";
+ Number *resg;
+ if (getFunctionFromScilab(n,name,x, 7, 1, 2, &resg))
+ {
+ return false;
+ }
+
+ Index i;
+ for(i=0;i<numVars_;i++)
+ {
+ grad_f[i]=resg[i];
+ }
+ return true;
+}
+
+// This method sets initial values for required vectors . For now we are assuming 0 to all values.
+bool minuncTMINLP::get_starting_point(Index n, bool init_x, Number* x,bool init_z, Number* z_L, Number* z_U,Index m, bool init_lambda,Number* lambda)
+{
+ assert(init_x == true);
+ assert(init_z == false);
+ assert(init_lambda == false);
+ if (init_x == true)
+ { //we need to set initial values for vector x
+ for (Index var=0;var<n;var++)
+ x[var]=varGuess_[var];//initialize with 0 or we can change.
+ }
+
+ return true;
+}
+
+/*
+ * Return either the sparsity structure of the Hessian of the Lagrangian,
+ * or the values of the Hessian of the Lagrangian for the given values for
+ * x,lambda,obj_factor.
+*/
+
+bool minuncTMINLP::eval_h(Index n, const Number* x, bool new_x,Number obj_factor, Index m, const Number* lambda,bool new_lambda, Index nele_hess, Index* iRow,Index* jCol, Number* values)
+{
+ double check;
+ if (values==NULL)
+ {
+ Index idx=0;
+ for (Index row = 0; row < numVars_; row++)
+ {
+ for (Index col = 0; col <= row; col++)
+ {
+ iRow[idx] = row;
+ jCol[idx] = col;
+ idx++;
+ }
+ }
+ }
+
+ else
+ {
+ char name[20]="_gradhess";
+ Number *resh;
+ if (getFunctionFromScilab(n,name,x, 7, 1,2,&resh))
+ {
+ return false;
+ }
+ Index index=0;
+ for (Index row=0;row < numVars_ ;++row)
+ {
+ for (Index col=0; col <= row; ++col)
+ {
+ values[index++]=obj_factor*(resh[numVars_*row+col]);
+ }
+ }
+ return true;
+ }
+}
+
+
+void minuncTMINLP::finalize_solution(SolverReturn status,Index n, const Number* x, Number obj_value)
+{
+ finalObjVal_ = obj_value;
+ status_ = status;
+ if(status==0 ||status== 3)
+ {
+ finalX_ = (double*)malloc(sizeof(double) * numVars_ * 1);
+ for (Index i=0; i<numVars_; i++)
+ {
+ finalX_[i] = x[i];
+ }
+ }
+
+}
+
+const double * minuncTMINLP::getX()
+{
+ return finalX_;
+}
+
+double minuncTMINLP::getObjVal()
+{
+ return finalObjVal_;
+}
+
+int minuncTMINLP::returnStatus()
+{
+ return status_;
+}