summaryrefslogtreecommitdiff
path: root/sci_gateway/cpp
diff options
context:
space:
mode:
authorHarpreet2016-01-25 01:05:02 +0530
committerHarpreet2016-01-25 01:05:02 +0530
commita2d9c2bfd6eb83d1a494821176388eb312d08254 (patch)
tree611fba3b340ba48b9d9d7435ce2f29b1ce0c12fa /sci_gateway/cpp
parentdd3d72ae2cdb43311b4e501966f09694bbd3e505 (diff)
downloadFOSSEE-Optimization-toolbox-a2d9c2bfd6eb83d1a494821176388eb312d08254.tar.gz
FOSSEE-Optimization-toolbox-a2d9c2bfd6eb83d1a494821176388eb312d08254.tar.bz2
FOSSEE-Optimization-toolbox-a2d9c2bfd6eb83d1a494821176388eb312d08254.zip
functions added
Diffstat (limited to 'sci_gateway/cpp')
-rwxr-xr-xsci_gateway/cpp/LinCLP.hpp82
-rw-r--r--sci_gateway/cpp/builder_gateway_cpp.sce36
-rw-r--r--sci_gateway/cpp/libFAMOS.c10
-rwxr-xr-xsci_gateway/cpp/libFAMOS.sobin122920 -> 199006 bytes
-rw-r--r--sci_gateway/cpp/loader.sce5
-rw-r--r--sci_gateway/cpp/minbndNLP.hpp116
-rw-r--r--sci_gateway/cpp/minconNLP.hpp173
-rw-r--r--sci_gateway/cpp/minuncNLP.hpp113
-rw-r--r--sci_gateway/cpp/read_mps.cpp113
-rw-r--r--sci_gateway/cpp/sci_LinCLP.cpp119
-rw-r--r--sci_gateway/cpp/sci_LinProg.cpp150
-rw-r--r--sci_gateway/cpp/sci_iofunc.cpp67
-rw-r--r--sci_gateway/cpp/sci_iofunc.hpp16
-rw-r--r--sci_gateway/cpp/sci_ipoptfminbnd.cpp195
-rw-r--r--sci_gateway/cpp/sci_ipoptfmincon.cpp273
-rw-r--r--sci_gateway/cpp/sci_ipoptfminunc.cpp201
-rw-r--r--sci_gateway/cpp/sci_minbndNLP.cpp353
-rw-r--r--sci_gateway/cpp/sci_minconNLP.cpp797
-rw-r--r--sci_gateway/cpp/sci_minuncNLP.cpp377
19 files changed, 3167 insertions, 29 deletions
diff --git a/sci_gateway/cpp/LinCLP.hpp b/sci_gateway/cpp/LinCLP.hpp
new file mode 100755
index 0000000..90964f4
--- /dev/null
+++ b/sci_gateway/cpp/LinCLP.hpp
@@ -0,0 +1,82 @@
+/*
+ * Linear Solver Toolbox for Scilab using CLP library
+ * Authors :
+ Guru Pradeep Reddy
+ Bhanu Priya Sayal
+
+* Optimizing (minimizing) the linear objective function having any number of
+ variables and linear constraints(equality/inequality).
+ *
+*/
+
+#ifndef __LinCLP_HPP__
+#define __LinCLP_HPP__
+
+#include"OsiSolverInterface.hpp"
+#include "OsiClpSolverInterface.hpp"
+#include "CoinPackedMatrix.hpp"
+#include "CoinPackedVector.hpp"
+
+class LinCLP
+{
+ private:
+
+ int numVars_; //Number of variables
+
+ int numCons_; //Number of inequality constraints
+
+ double* objMatrix_[]; //Objective function vector
+
+ double* conMatrix_[]; //Inequality constraint matrix
+
+ double* conlb_[]; //Inequality constraint vector
+
+ double* conub_[]; //Equality constraint vector
+
+ double* lb_[]; //Lower bounds for all variables
+
+ double* ub_[]; //Upper bounds for all variables
+
+ double options_[]; //options for setting maximum iterations and writing mps and lp files
+
+ double* xValue_ = NULL; //Optimal value of variables
+
+ double objValue_ =0 ; //Optimal values of objective
+
+ double status_ = 0; //Return Status
+
+ double iterations_ = 0; //Number of iteration
+
+ double* reducedCost_ = NULL; //Reduced cost
+
+ double* dual_ = NULL; // Dual of the solution
+
+
+ public:
+/*
+ * Constructor
+*/
+ LinCLP(int numVars_ , int numCons_ ,double objMatrix_[] , double conMatrix_[] , double conlb_[] , double conub_[] ,double lb_[] , double ub_[], double options_[]);
+
+
+ virtual ~LinCLP(); //Destructor to free memory
+
+ const double* getX(); //Returns a pointer to matrix of size
+ //1*numVars with final values for the objective variables
+
+ double getObjVal(); //Returns the output of the final value of the objective
+
+ int returnStatus(); //Returns the status of the problem
+
+ double iterCount(); //Returns the iteration count
+
+ const double* getReducedCost(); //Returns a pointer to matrix of size
+ //1*numVars with values for lower dual vector
+
+ double* getDual(); //Returns a pointer to matrix of size
+ //1*numCons with values for dual vector
+
+};
+
+#endif __LinCLP_HPP__
+
diff --git a/sci_gateway/cpp/builder_gateway_cpp.sce b/sci_gateway/cpp/builder_gateway_cpp.sce
index 6e4adf7..3503996 100644
--- a/sci_gateway/cpp/builder_gateway_cpp.sce
+++ b/sci_gateway/cpp/builder_gateway_cpp.sce
@@ -1,13 +1,13 @@
// Copyright (C) 2015 - IIT Bombay - FOSSEE
//
+// Author: Keyur Joshi, Sai Kiran, Iswarya and Harpreet Singh
+// Organization: FOSSEE, IIT Bombay
+// Email: harpreet.mertia@gmail.com
// This file must be used under the terms of the CeCILL.
// This source file is licensed as described in the file COPYING, which
// you should have received as part of this distribution. The terms
// are also available at
// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
-// Author: Harpreet Singh
-// Organization: FOSSEE, IIT Bombay
-// Email: toolbox@scilab.in
mode(-1)
lines(0)
@@ -21,9 +21,9 @@ path_builder = get_absolute_file_path('builder_gateway_cpp.sce');
tools_path = path_builder + "../../thirdparty/linux/";
-C_Flags=["-w -fpermissive -I"+tools_path+"include/coin -Wl,-rpath="+tools_path+"lib/"+Version+filesep()+" "]
+C_Flags=["-D__USE_DEPRECATED_STACK_FUNCTIONS__ -w -fpermissive -I"+tools_path+"include/coin -Wl,-rpath="+tools_path+"lib/"+Version+filesep()+" "]
-Linker_Flag = ["-L"+tools_path+"lib/"+Version+filesep()+"libSym"+" "+"-L"+tools_path+"lib/"+Version+filesep()+"libipopt" ]
+Linker_Flag = ["-L"+tools_path+"lib/"+Version+filesep()+"libSym"+" "+"-L"+tools_path+"lib/"+Version+filesep()+"libipopt"+" "+"-L"+tools_path+"lib/"+Version+filesep()+"libClp"+" "+"-L"+tools_path+"lib/"+Version+filesep()+"libOsiClp"+" "+"-L"+tools_path+"lib/"+Version+filesep()+"libCoinUtils" ]
//Name of All the Functions
@@ -108,8 +108,17 @@ Function_Names = [
"sym_getIterCount","sci_sym_get_iteration_count";
"sym_getConstrActivity","sci_sym_getRowActivity";
+ //Linprog function
+ "linearprog","sci_linearprog"
+ "rmps","sci_rmps"
+
//QP function
"solveqp","sci_solveqp"
+
+ //fminunc function and fminbnd function
+ "solveminuncp","sci_solveminuncp"
+ "solveminbndp","sci_solveminbndp"
+ "solveminconp","sci_solveminconp"
];
//Name of all the files to be compiled
@@ -140,8 +149,21 @@ Files = [
"sci_sym_remove.cpp",
"sci_QuadNLP.cpp",
"QuadNLP.hpp",
- "sci_ipopt.cpp"
-
+ "sci_ipopt.cpp",
+ "minuncNLP.hpp",
+ "sci_minuncNLP.cpp",
+ "sci_ipoptfminunc.cpp",
+ "minbndNLP.hpp",
+ "sci_minbndNLP.cpp",
+ "sci_ipoptfminbnd.cpp",
+ "minconNLP.hpp",
+ "sci_minconNLP.cpp",
+ "sci_ipoptfmincon.cpp",
+ "sci_ipopt.cpp",
+ "sci_LinProg.cpp",
+ "sci_LinCLP.cpp",
+ "LinCLP.hpp",
+ "read_mps.cpp"
]
tbx_build_gateway(toolbox_title,Function_Names,Files,get_absolute_file_path("builder_gateway_cpp.sce"), [], Linker_Flag, C_Flags, [], "g++");
diff --git a/sci_gateway/cpp/libFAMOS.c b/sci_gateway/cpp/libFAMOS.c
index 61990ad..d7911de 100644
--- a/sci_gateway/cpp/libFAMOS.c
+++ b/sci_gateway/cpp/libFAMOS.c
@@ -64,7 +64,12 @@ extern Gatefunc sci_sym_getVarSoln;
extern Gatefunc sci_sym_getObjVal;
extern Gatefunc sci_sym_get_iteration_count;
extern Gatefunc sci_sym_getRowActivity;
+extern Gatefunc sci_linearprog;
+extern Gatefunc sci_rmps;
extern Gatefunc sci_solveqp;
+extern Gatefunc sci_solveminuncp;
+extern Gatefunc sci_solveminbndp;
+extern Gatefunc sci_solveminconp;
static GenericTable Tab[]={
{(Myinterfun)sci_gateway,sci_sym_open,"sym_open"},
{(Myinterfun)sci_gateway,sci_sym_close,"sym_close"},
@@ -124,7 +129,12 @@ static GenericTable Tab[]={
{(Myinterfun)sci_gateway,sci_sym_getObjVal,"sym_getObjVal"},
{(Myinterfun)sci_gateway,sci_sym_get_iteration_count,"sym_getIterCount"},
{(Myinterfun)sci_gateway,sci_sym_getRowActivity,"sym_getConstrActivity"},
+ {(Myinterfun)sci_gateway,sci_linearprog,"linearprog"},
+ {(Myinterfun)sci_gateway,sci_rmps,"rmps"},
{(Myinterfun)sci_gateway,sci_solveqp,"solveqp"},
+ {(Myinterfun)sci_gateway,sci_solveminuncp,"solveminuncp"},
+ {(Myinterfun)sci_gateway,sci_solveminbndp,"solveminbndp"},
+ {(Myinterfun)sci_gateway,sci_solveminconp,"solveminconp"},
};
int C2F(libFAMOS)()
diff --git a/sci_gateway/cpp/libFAMOS.so b/sci_gateway/cpp/libFAMOS.so
index 4ab40e5..c8462c6 100755
--- a/sci_gateway/cpp/libFAMOS.so
+++ b/sci_gateway/cpp/libFAMOS.so
Binary files differ
diff --git a/sci_gateway/cpp/loader.sce b/sci_gateway/cpp/loader.sce
index fe1d630..37305c7 100644
--- a/sci_gateway/cpp/loader.sce
+++ b/sci_gateway/cpp/loader.sce
@@ -68,7 +68,12 @@ list_functions = [ 'sym_open';
'sym_getObjVal';
'sym_getIterCount';
'sym_getConstrActivity';
+ 'linearprog';
+ 'rmps';
'solveqp';
+ 'solveminuncp';
+ 'solveminbndp';
+ 'solveminconp';
];
addinter(libFAMOS_path + filesep() + 'libFAMOS' + getdynlibext(), 'libFAMOS', list_functions);
// remove temp. variables on stack
diff --git a/sci_gateway/cpp/minbndNLP.hpp b/sci_gateway/cpp/minbndNLP.hpp
new file mode 100644
index 0000000..17d5a7e
--- /dev/null
+++ b/sci_gateway/cpp/minbndNLP.hpp
@@ -0,0 +1,116 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: R.Vidyadhar & Vignesh Kannan
+// Organization: FOSSEE, IIT Bombay
+// Email: rvidhyadar@gmail.com & vignesh2496@gmail.com
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+
+
+#ifndef __minbndNLP_HPP__
+#define __minbndNLP_HPP__
+#include "IpTNLP.hpp"
+
+using namespace Ipopt;
+
+class minbndNLP : public TNLP
+{
+ private:
+
+ Index numVars_; //Number of input variables
+
+ Index numConstr_; //Number of constraints
+
+ Number *finalX_= NULL; //finalX_ is a pointer to a matrix of size of 1*1
+ //with final value for the primal variable.
+
+ Number *finalZl_= NULL; //finalZl_ is a pointer to a matrix of size of 1*numVar_
+ // with final values for the lower bound multipliers
+
+ Number *finalZu_= NULL; //finalZu_ is a pointer to a matrix of size of 1*numVar_
+ // with final values for the upper bound multipliers
+
+ Number finalObjVal_; //finalObjVal_ is a scalar with the final value of the objective.
+
+ int iter_; //Number of iteration.
+
+ int status_; //Solver return status
+
+
+ const Number *varUB_= NULL; //varUB_ is a pointer to a matrix of size of 1*1
+ // with upper bounds of all variable.
+
+ const Number *varLB_= NULL; //varLB_ is a pointer to a matrix of size of 1*1
+ // with lower bounds of all variable.
+
+ minbndNLP(const minbndNLP&);
+ minbndNLP& operator=(const minbndNLP&);
+
+ public:
+
+ /** user defined constructor */
+ minbndNLP(Index nV, Index nC,Number *LB,Number *UB):numVars_(nV),numConstr_(nC),finalX_(0),finalZl_(0), finalZu_(0),varLB_(LB),varUB_(UB),finalObjVal_(1e20){ }
+
+ /** default destructor */
+ virtual ~minbndNLP();
+
+ /** Method to return some info about the nlp */
+ virtual bool get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
+ Index& nnz_h_lag, IndexStyleEnum& index_style);
+
+ /** Method to return the bounds for my problem */
+ virtual bool get_bounds_info(Index n, Number* x_l, Number* x_u,
+ Index m, Number* g_l, Number* g_u);
+
+ /** Method to return the starting point for the algorithm */
+ virtual bool get_starting_point(Index n, bool init_x, Number* x,
+ bool init_z, Number* z_L, Number* z_U,
+ Index m, bool init_lambda,
+ Number* lambda);
+
+ /** Method to return the objective value */
+ virtual bool eval_f(Index n, const Number* x, bool new_x, Number& obj_value);
+
+ /** Method to return the gradient of the objective */
+ virtual bool eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f);
+
+ /** Method to return the constraint residuals */
+ virtual bool eval_g(Index n, const Number* x, bool new_x, Index m, Number* g);
+
+ /** Method to return:
+ * 1) The structure of the jacobian (if "values" is NULL)
+ * 2) The values of the jacobian (if "values" is not NULL)
+ */
+ virtual bool eval_jac_g(Index n, const Number* x, bool new_x,Index m, Index nele_jac, Index* iRow, Index *jCol,Number* values);
+
+ /** Method to return:
+ * 1) The structure of the hessian of the lagrangian (if "values" is NULL)
+ * 2) The values of the hessian of the lagrangian (if "values" is not NULL)
+ */
+ virtual bool eval_h(Index n, const Number* x, bool new_x,Number obj_factor, Index m, const Number* lambda,bool new_lambda, Index nele_hess, Index* iRow,Index* jCol, Number* values);
+
+ /** This method is called when the algorithm is complete so the TNLP can store/write the solution */
+ virtual void finalize_solution(SolverReturn status,Index n, const Number* x, const Number* z_L, const Number* z_U,Index m, const Number* g, const Number* lambda,Number obj_value,const IpoptData* ip_data,IpoptCalculatedQuantities* ip_cq);
+
+ const double * getX(); //Returns a pointer to a matrix of size of 1*1
+ //with final value for the primal variable.
+
+ const double * getZl(); //Returns a pointer to a matrix of size of 1*numVars_
+ // with final values for the lower bound multipliers
+
+ const double * getZu(); //Returns a pointer to a matrix of size of 1*numVars_
+ //with final values for the upper bound multipliers
+
+ double getObjVal(); //Returns the output of the final value of the objective.
+
+ double iterCount(); //Returns the iteration count
+
+ int returnStatus(); //Returns the status count
+
+};
+
+
+#endif
diff --git a/sci_gateway/cpp/minconNLP.hpp b/sci_gateway/cpp/minconNLP.hpp
new file mode 100644
index 0000000..df496ce
--- /dev/null
+++ b/sci_gateway/cpp/minconNLP.hpp
@@ -0,0 +1,173 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: R.Vidyadhar & Vignesh Kannan
+// Organization: FOSSEE, IIT Bombay
+// Email: rvidhyadar@gmail.com & vignesh2496@gmail.com
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+
+
+#ifndef __minconNLP_HPP__
+#define __minconNLP_HPP__
+#include "IpTNLP.hpp"
+
+using namespace Ipopt;
+
+class minconNLP : public TNLP
+{
+ private:
+
+ Index numVars_; //Number of input variables
+
+ Index numConstr_; //Number of constraints
+
+ Number flag1_; //Gradient of objective ON or OFF
+
+ Number flag2_; //Hessian of objective ON or OFF
+
+ Number flag3_; //Jacobian of constraints ON or OFF
+
+ Number nonlinCon_; //Number of non-linear constraints
+
+ Number nonlinIneqCon_; //Number of non-linear inequality constraints
+
+ const Number *A_= NULL; //Matrix for linear inequality constraints
+
+ const Number *b_= NULL; //Matrix for bounds of linear inequality constraints
+
+ const Number *Aeq_= NULL; //Matrix for linear equality constraints
+
+ const Number *beq_= NULL; //Matrix for bounds of linear equality constraints
+
+ Index Arows_; //Number of rows of linear inequality constraints
+
+ Index Acols_; //Number of columns of linear inequality constraints
+
+ Index brows_; //Number of rows of bounds of linear inequality constraints
+
+ Index bcols_; //Number of columns of bounds of linear inequality constraints
+
+ Index Aeqrows_; //Number of rows of linear equality constraints
+
+ Index Aeqcols_; //Number of columns of linear equality constraints
+
+ Index beqrows_; //Number of rows of bounds of linear equality constraints
+
+ Index beqcols_; //Number of columns of bounds of linear equality constraints
+
+
+ const Number *varGuess_= NULL; //varGuess_ is a pointer to a matrix of size of 1*numVars_
+ //with initial guess of all variables.
+
+ const Number *varUB_= NULL; //varUB_ is a pointer to a matrix of size of 1*numVar_
+ // with upper bounds of all variables.
+
+ const Number *varLB_= NULL; //varLB_ is a pointer to a matrix of size of 1*numVar_
+ // with lower bounds of all variables.
+
+ Number *finalZl_= NULL; //finalZl_ is a pointer to a matrix of size of 1*numVar_
+ // with final values for the lower bound multipliers
+
+ Number *finalZu_= NULL; //finalZu_ is a pointer to a matrix of size of 1*numVar_
+ // with final values for the upper bound multipliers
+
+ Number *finalLambda_= NULL; //finalLambda_ is a pointer to a matrix of size of 1*numConstr_
+ // with final values for the upper bound multipliers
+
+ Number *finalX_= NULL; //finalX_ is a pointer to a matrix of size of 1*numVars_
+ //with final value for the primal variables.
+
+ Number *finalGradient_=NULL; //finalGradient_ is a pointer to a matrix of size of numVars_*numVars_
+ //with final value of gradient for the primal variables.
+
+
+ Number *finalHessian_=NULL; //finalHessian_ is a pointer to a matrix of size of 1*numVar_
+ //with final value of hessian for the primal variables.
+
+
+ Number finalObjVal_; //finalObjVal_ is a scalar with the final value of the objective.
+
+ int iter_; //Number of iteration.
+
+ int status_; //Solver return status
+
+
+ minconNLP(const minconNLP&);
+ minconNLP& operator=(const minconNLP&);
+
+ public:
+
+ /** user defined constructor */
+ minconNLP(Index nV, Index nC, Number *x0 ,Number *A, Number *b, Number* Aeq, Number *beq, Index Arows, Index Acols, Index brows, Index bcols, Index Aeqrows, Index Aeqcols, Index beqrows, Index beqcols, Number* LB, Number* UB, Number nlC, Number nlIC, Number f1, Number f2, Number f3) : numVars_(nV), numConstr_(nC), varGuess_(x0), A_(A), b_(b), Aeq_(Aeq), beq_(beq), Arows_(Arows), Acols_(Acols), brows_(brows), bcols_(bcols), Aeqrows_(Aeqrows), Aeqcols_(Aeqcols), beqrows_(beqrows), beqcols_(beqcols), varLB_(LB), varUB_(UB), nonlinCon_(nlC), nonlinIneqCon_(nlIC), flag1_(f1), flag2_(f2), flag3_(f3), finalX_(0), finalZl_(0), finalZu_(0), finalGradient_(0), finalHessian_(0), finalObjVal_(1e20){ }
+
+ /** default destructor */
+ virtual ~minconNLP();
+
+ /** Method to return some info about the nlp */
+ virtual bool get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
+ Index& nnz_h_lag, IndexStyleEnum& index_style);
+
+ /** Method to return the bounds for my problem */
+ virtual bool get_bounds_info(Index n, Number* x_l, Number* x_u,
+ Index m, Number* g_l, Number* g_u);
+
+ /** Method to return the starting point for the algorithm */
+ virtual bool get_starting_point(Index n, bool init_x, Number* x,
+ bool init_z, Number* z_L, Number* z_U,
+ Index m, bool init_lambda,
+ Number* lambda);
+
+ /** Method to return the objective value */
+ virtual bool eval_f(Index n, const Number* x, bool new_x, Number& obj_value);
+
+ /** Method to return the gradient of the objective */
+ virtual bool eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f);
+
+ /** Method to return the constraint residuals */
+ virtual bool eval_g(Index n, const Number* x, bool new_x, Index m, Number* g);
+
+ /** Method to return:
+ * 1) The structure of the jacobian (if "values" is NULL)
+ * 2) The values of the jacobian (if "values" is not NULL)
+ */
+ virtual bool eval_jac_g(Index n, const Number* x, bool new_x,Index m, Index nele_jac, Index* iRow, Index *jCol,Number* values);
+
+ /** Method to return:
+ * 1) The structure of the hessian of the lagrangian (if "values" is NULL)
+ * 2) The values of the hessian of the lagrangian (if "values" is not NULL)
+ */
+ virtual bool eval_h(Index n, const Number* x, bool new_x,Number obj_factor, Index m, const Number* lambda,bool new_lambda, Index nele_hess, Index* iRow,Index* jCol, Number* values);
+
+ /** This method is called when the algorithm is complete so the TNLP can store/write the solution */
+ virtual void finalize_solution(SolverReturn status,Index n, const Number* x, const Number* z_L, const Number* z_U,Index m, const Number* g, const Number* lambda,Number obj_value,const IpoptData* ip_data,IpoptCalculatedQuantities* ip_cq);
+
+ const double * getX(); //Returns a pointer to a matrix of size of 1*numVars_
+ //with final value for the primal variables.
+
+ const double * getGrad(); //Returns a pointer to a matrix of size of 1*numVars_
+ //with final value of gradient for the primal variables.
+
+ const double * getHess(); //Returns a pointer to a matrix of size of numVars_*numVars_
+ //with final value of hessian for the primal variables.
+
+ const double * getZl(); //Returns a pointer to a matrix of size of 1*numVars_
+ // with final values for the lower bound multipliers
+
+ const double * getZu(); //Returns a pointer to a matrix of size of 1*numVars_
+ //with final values for the upper bound multipliers
+
+ const double * getLambda(); //Returns a pointer to a matrix of size of 1*numConstr_
+ //with final values for the constraint multipliers
+
+ double getObjVal(); //Returns the output of the final value of the objective.
+
+ double iterCount(); //Returns the iteration count
+
+ int returnStatus(); //Returns the status count
+
+};
+
+#endif
diff --git a/sci_gateway/cpp/minuncNLP.hpp b/sci_gateway/cpp/minuncNLP.hpp
new file mode 100644
index 0000000..70910e5
--- /dev/null
+++ b/sci_gateway/cpp/minuncNLP.hpp
@@ -0,0 +1,113 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: R.Vidyadhar & Vignesh Kannan
+// Organization: FOSSEE, IIT Bombay
+// Email: rvidhyadar@gmail.com & vignesh2496@gmail.com
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+
+
+#ifndef __minuncNLP_HPP__
+#define __minuncNLP_HPP__
+#include "IpTNLP.hpp"
+
+using namespace Ipopt;
+
+class minuncNLP : public TNLP
+{
+ private:
+
+ Index numVars_; //Number of input variables
+
+ Index numConstr_; //Number of constraints
+
+ Number flag1_; //Used for Gradient On/OFF
+
+ Number flag2_; //Used for Hessian ON/OFF
+
+ const Number *varGuess_= NULL; //varGuess_ is a pointer to a matrix of size of 1*numVars_ with initial guess of all variables.
+
+ Number *finalX_= NULL; //finalX_ is a pointer to a matrix of size of 1*numVars_ with final value for the primal variables.
+
+ Number *finalGradient_=NULL; //finalGradient_ is a pointer to a matrix of size of numVars_*numVars_ with final value of gradient for the primal variables.
+
+ Number *finalHessian_=NULL; //finalHessian_ is a pointer to a matrix of size of 1*numVar_ with final value of hessian for the primal variables.
+
+ Number finalObjVal_; //finalObjVal_ is a scalar with the final value of the objective.
+
+ int iter_; //Number of iteration.
+
+ int status_; //Solver return status
+
+
+ minuncNLP(const minuncNLP&);
+ minuncNLP& operator=(const minuncNLP&);
+
+ public:
+
+ /** user defined constructor */
+ minuncNLP(Index nV, Index nC,Number *x0,Number f1, Number f2):numVars_(nV),numConstr_(nC),varGuess_(x0),flag1_(f1),flag2_(f2),finalX_(0),finalGradient_(0),finalHessian_(0),finalObjVal_(1e20){ }
+
+ /** default destructor */
+ virtual ~minuncNLP();
+
+ /** Method to return some info about the nlp */
+ virtual bool get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
+ Index& nnz_h_lag, IndexStyleEnum& index_style);
+
+ /** Method to return the bounds for my problem */
+ virtual bool get_bounds_info(Index n, Number* x_l, Number* x_u,
+ Index m, Number* g_l, Number* g_u);
+
+ /** Method to return the starting point for the algorithm */
+ virtual bool get_starting_point(Index n, bool init_x, Number* x,
+ bool init_z, Number* z_L, Number* z_U,
+ Index m, bool init_lambda,
+ Number* lambda);
+
+ /** Method to return the objective value */
+ virtual bool eval_f(Index n, const Number* x, bool new_x, Number& obj_value);
+
+ /** Method to return the gradient of the objective */
+ virtual bool eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f);
+
+ /** Method to return the constraint residuals */
+ virtual bool eval_g(Index n, const Number* x, bool new_x, Index m, Number* g);
+
+ /** Method to return:
+ * 1) The structure of the jacobian (if "values" is NULL)
+ * 2) The values of the jacobian (if "values" is not NULL)
+ */
+ virtual bool eval_jac_g(Index n, const Number* x, bool new_x,Index m, Index nele_jac, Index* iRow, Index *jCol,Number* values);
+
+ /** Method to return:
+ * 1) The structure of the hessian of the lagrangian (if "values" is NULL)
+ * 2) The values of the hessian of the lagrangian (if "values" is not NULL)
+ */
+ virtual bool eval_h(Index n, const Number* x, bool new_x,Number obj_factor, Index m, const Number* lambda,bool new_lambda, Index nele_hess, Index* iRow,Index* jCol, Number* values);
+
+ /** This method is called when the algorithm is complete so the TNLP can store/write the solution */
+ virtual void finalize_solution(SolverReturn status,Index n, const Number* x, const Number* z_L, const Number* z_U,Index m, const Number* g, const Number* lambda,Number obj_value,const IpoptData* ip_data,IpoptCalculatedQuantities* ip_cq);
+
+ const double * getX(); //Returns a pointer to a matrix of size of 1*numVars_
+ //with final value for the primal variables.
+
+ const double * getGrad(); //Returns a pointer to a matrix of size of 1*numVars_
+ //with final value of gradient for the primal variables.
+
+ const double * getHess(); //Returns a pointer to a matrix of size of numVars_*numVars_
+ //with final value of hessian for the primal variables.
+
+ double getObjVal(); //Returns the output of the final value of the objective.
+
+ double iterCount(); //Returns the iteration count
+
+ int returnStatus(); //Returns the status count
+
+};
+
+
+#endif
diff --git a/sci_gateway/cpp/read_mps.cpp b/sci_gateway/cpp/read_mps.cpp
new file mode 100644
index 0000000..31f71b8
--- /dev/null
+++ b/sci_gateway/cpp/read_mps.cpp
@@ -0,0 +1,113 @@
+/*
+ * Linear Solver Toolbox for Scilab using CLP library
+ * Authors :
+ Guru Pradeep Reddy
+ Bhanu Priya Sayal
+*/
+
+#include "sci_iofunc.hpp"
+#include "OsiClpSolverInterface.hpp"
+
+extern "C"{
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <localization.h>
+#include <sciprint.h>
+#include <iostream>
+
+//Solver function
+int sci_rmps(char *fname)
+{
+ //creating a problem pointer using base class of OsiSolverInterface and
+ //instantiate the object using derived class of ClpSolverInterface
+ OsiSolverInterface* si = new OsiClpSolverInterface();
+
+ // Error management variable
+ SciErr sciErr;
+
+ //data declarations
+ int *piAddressVarOne = NULL; //pointer used to access argument of the function
+ char* ptr; //pointer to point to address of file name
+ double* options_; //options to set maximum iterations
+ CheckInputArgument(pvApiCtx, 2,2 ); //Check we have exactly two arguments as input or not
+ CheckOutputArgument(pvApiCtx, 6, 6); //Check we have exactly six arguments on output side or not
+ //Getting the input arguments from Scilab
+ //Getting the MPS file path
+ //Reading mps file
+ getStringFromScilab(1,&ptr);
+
+ std::cout<<ptr;
+
+ //get options from Scilab
+ if(getFixedSizeDoubleMatrixInList(2 , 2 , 1 , 1 , &options_))
+ {
+ return 1;
+ }
+
+ //Read the MPS file
+ si->readMps(ptr);
+
+ //setting options for maximum iterations
+ si->setIntParam(OsiMaxNumIteration,options_[0]);
+
+ //Solve the problem
+ si->initialSolve();
+
+ //Quering about the problem
+ //get number of variables
+ double numVars_;
+ numVars_ = si->getNumCols();
+
+ //get number of constraint equations
+ double numCons_;
+ numCons_ = si->getNumRows();
+
+ //Output the solution to Scilab
+ //get solution for x
+ const double* xValue = si->getColSolution();
+
+ //get objective value
+ double objValue = si->getObjValue();
+
+ //get Status value
+ double status;
+ if(si->isProvenOptimal())
+ status=0;
+ else if(si->isProvenPrimalInfeasible())
+ status=1;
+ else if(si->isProvenDualInfeasible())
+ status=2;
+ else if(si->isIterationLimitReached())
+ status=3;
+ else if(si->isAbandoned())
+ status=4;
+ else if(si->isPrimalObjectiveLimitReached())
+ status=5;
+ else if(si->isDualObjectiveLimitReached())
+ status=6;
+
+ //get number of iterations
+ double iterations = si->getIterationCount();
+
+ //get reduced cost
+ const double* reducedCost = si->getReducedCost();
+
+ //get dual vector
+ const double* dual = si->getRowPrice();
+
+ returnDoubleMatrixToScilab(1 , 1 , numVars_ , xValue);
+ returnDoubleMatrixToScilab(2 , 1 , 1 , &objValue);
+ returnDoubleMatrixToScilab(3 , 1 , 1 , &status);
+ returnDoubleMatrixToScilab(4 , 1 , 1 , &iterations);
+ returnDoubleMatrixToScilab(5 , 1 , numVars_ , reducedCost);
+ returnDoubleMatrixToScilab(6 , 1 , numCons_ , dual);
+
+ free(xValue);
+ free(dual);
+ free(reducedCost);
+}
+}
+
+
+
+
diff --git a/sci_gateway/cpp/sci_LinCLP.cpp b/sci_gateway/cpp/sci_LinCLP.cpp
new file mode 100644
index 0000000..7996bfc
--- /dev/null
+++ b/sci_gateway/cpp/sci_LinCLP.cpp
@@ -0,0 +1,119 @@
+/*
+ * Linear Solver Toolbox for Scilab using CLP library
+ * Authors :
+ Guru Pradeep Reddy
+ Bhanu Priya Sayal
+*/
+
+#include "LinCLP.hpp"
+extern "C"{
+#include "api_scilab.h"
+#include "Scierror.h"
+#include "localization.h"
+#include "sciprint.h"
+#include "sci_iofunc.hpp"
+
+//creating a problem pointer using Base class of OsiSolverInterface and
+//Instantiate the object using specific derived class of ClpSolver
+OsiSolverInterface* si = new OsiClpSolverInterface();
+
+LinCLP::~LinCLP()
+ {
+ free(objMatrix_);
+ free(conMatrix_);
+ free(conlb_);
+ free(conub_);
+ free(lb_);
+ free(ub_);
+ free(xValue_);
+ free(reducedCost_);
+ free(dual_);}
+
+//Clp Solver function definition
+LinCLP::LinCLP(int numVars_ , int numCons_ ,double objMatrix_[] , double conMatrix_[] , double conlb_[] , double conub_[] ,double lb_[] , double ub_[], double options_[])
+{
+
+ //Defining the constraint matrix
+ CoinPackedMatrix *matrix = new CoinPackedMatrix(false , 0 , 0);
+ matrix->setDimensions(0 , numVars_);
+ for(int i=0 ; i<numCons_ ; i++)
+ {
+ CoinPackedVector row;
+ for(int j=0 ; j<numVars_ ; j++)
+ {
+ row.insert(j, conMatrix_[i+j*numCons_]);
+ }
+
+ matrix->appendRow(row);
+ }
+
+ //setting options for maximum iterations
+ si->setIntParam(OsiMaxNumIteration,options_[0]);
+
+ //Load the problem to OSI
+ si->loadProblem(*matrix , lb_ , ub_, objMatrix_ , conlb_ , conub_);
+
+ //Solve the problem
+ si->initialSolve();
+
+}
+
+ //Output the solution to Scilab
+ //get solution for x
+ const double* LinCLP::getX()
+ {
+ xValue_ = si->getColSolution();
+ return xValue_;
+ }
+
+ //get objective value
+ double LinCLP::getObjVal()
+ {
+ objValue_ = si->getObjValue();
+ return objValue_;
+ }
+
+ //get exit status
+ int LinCLP::returnStatus()
+ {
+ status_;
+ if(si->isProvenOptimal())
+ status_=0;
+ else if(si->isProvenPrimalInfeasible())
+ status_=1;
+ else if(si->isProvenDualInfeasible())
+ status_=2;
+ else if(si->isIterationLimitReached())
+ status_=3;
+ else if(si->isAbandoned())
+ status_=4;
+ else if(si->isPrimalObjectiveLimitReached())
+ status_=5;
+ else if(si->isDualObjectiveLimitReached())
+ status_=6;
+ return status_;
+ }
+
+ //get number of iterations
+ double LinCLP::iterCount()
+ {
+ iterations_ = si->getIterationCount();
+ return iterations_;
+ }
+
+ //get lower vector
+ const double* LinCLP::getReducedCost()
+ {
+ reducedCost_ = si->getReducedCost();
+ return reducedCost_;
+ }
+
+ //get dual vector
+ double* LinCLP::getDual()
+ {
+ dual_ = si->getRowPrice();
+ return dual_;
+ }
+
+}
+
diff --git a/sci_gateway/cpp/sci_LinProg.cpp b/sci_gateway/cpp/sci_LinProg.cpp
new file mode 100644
index 0000000..8a4ec25
--- /dev/null
+++ b/sci_gateway/cpp/sci_LinProg.cpp
@@ -0,0 +1,150 @@
+/*
+ * Linear Solver Toolbox for Scilab using CLP library
+ * Authors :
+ Guru Pradeep Reddy
+ Bhanu Priya Sayal
+*/
+
+#include "sci_iofunc.hpp"
+#include "LinCLP.hpp"
+
+extern "C"{
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <localization.h>
+#include <sciprint.h>
+
+//Solver function
+int sci_linearprog(char *fname)
+{
+ //Objective function
+ double* obj;
+ //Constraint matrix coefficients
+ double* conMatrix;
+ //Constraints upper bound
+ double* conlb;
+ //Constraints lower bound
+ double* conub;
+ //Lower bounds for variables
+ double* lb;
+ //Upper bounds for variables
+ double* ub;
+ //options for maximum iterations and writing mps
+ double* options;
+ //Flag for Mps
+ double flagMps;
+ //mps file path
+ char * mpsFile;
+ //Error structure in Scilab
+ SciErr sciErr;
+ //Number of rows and columns in objective function
+ int nVars=0, nCons=0,temp1=0,temp2=0;
+
+ CheckInputArgument(pvApiCtx , 9 , 9); //Checking the input arguments
+ CheckOutputArgument(pvApiCtx , 6, 6); //Checking the output arguments
+
+ ////////// Manage the input argument //////////
+
+ //Number of Variables
+ if(getIntFromScilab(1,&nVars))
+ {
+ return 1;
+ }
+
+ //Number of Constraints
+ if (getIntFromScilab(2,&nCons))
+ {
+ return 1;
+ }
+
+ //Objective function from Scilab
+ temp1 = nVars;
+ temp2 = nCons;
+ if (getFixedSizeDoubleMatrixFromScilab(3,1,temp1,&obj))
+ {
+ return 1;
+ }
+
+ if (nCons!=0)
+ {
+ //conMatrix matrix from scilab
+ temp1 = nCons;
+ temp2 = nVars;
+
+ if (getFixedSizeDoubleMatrixFromScilab(4,temp1,temp2,&conMatrix))
+ {
+ return 1;
+ }
+
+ //conLB matrix from scilab
+ temp1 = nCons;
+ temp2 = 1;
+ if (getFixedSizeDoubleMatrixFromScilab(5,temp1,temp2,&conlb))
+ {
+ return 1;
+ }
+
+ //conUB matrix from scilab
+ if (getFixedSizeDoubleMatrixFromScilab(6,temp1,temp2,&conub))
+ {
+ return 1;
+ }
+
+ }
+
+ //lb matrix from scilab
+ temp1 = 1;
+ temp2 = nVars;
+ if (getFixedSizeDoubleMatrixFromScilab(7,temp1,temp2,&lb))
+ {
+ return 1;
+ }
+
+
+ //ub matrix from scilab
+ if (getFixedSizeDoubleMatrixFromScilab(8,temp1,temp2,&ub))
+ {
+ return 1;
+ }
+
+ //get options from scilab
+ if(getFixedSizeDoubleMatrixInList(9 , 2 , 1 , 1 , &options))
+ {
+ return 1;
+ }
+
+ //Call to the Clp Solver
+ LinCLP* Prob = new LinCLP(nVars,nCons,obj,conMatrix,conlb,conub,lb,ub,options);
+
+ //Output the solution to Scilab
+ //get solution for x
+ double* xValue = Prob->getX();
+
+ //get objective value
+ double objValue = Prob->getObjVal();
+
+ //get Status value
+ double status = Prob->returnStatus();
+
+ //get number of iterations
+ double iterations = Prob->iterCount();
+
+ //get reduced cost
+ double* reducedCost = Prob->getReducedCost();
+
+ //get dual vector
+ double* dual = Prob->getDual();
+
+ returnDoubleMatrixToScilab(1 , 1 , nVars , xValue);
+ returnDoubleMatrixToScilab(2 , 1 , 1 , &objValue);
+ returnDoubleMatrixToScilab(3 , 1 , 1 , &status);
+ returnDoubleMatrixToScilab(4 , 1 , 1 , &iterations);
+ returnDoubleMatrixToScilab(5 , 1 , nVars , reducedCost);
+ returnDoubleMatrixToScilab(6 , 1 , nCons , dual);
+
+ }
+}
+
+
+
+
diff --git a/sci_gateway/cpp/sci_iofunc.cpp b/sci_gateway/cpp/sci_iofunc.cpp
index e92c318..8dc2acf 100644
--- a/sci_gateway/cpp/sci_iofunc.cpp
+++ b/sci_gateway/cpp/sci_iofunc.cpp
@@ -1,14 +1,6 @@
-// Copyright (C) 2015 - IIT Bombay - FOSSEE
-//
-// This file must be used under the terms of the CeCILL.
-// This source file is licensed as described in the file COPYING, which
-// you should have received as part of this distribution. The terms
-// are also available at
-// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
-// Author: Keyur Joshi, Harpreet Singh
-// Organization: FOSSEE, IIT Bombay
-// Email: toolbox@scilab.in
-
+// Symphony Toolbox for Scilab
+// (Definition of) Functions for input and output from Scilab
+// By Keyur Joshi
#include "api_scilab.h"
#include "Scierror.h"
@@ -16,6 +8,35 @@
#include "BOOL.h"
#include <localization.h>
+using namespace std;
+
+int getFunctionFromScilab(int argNum, int **dest)
+{
+ //data declarations
+ SciErr sciErr;
+ int iRet,*varAddress, iType;
+ double inputDouble;
+ const char errMsg[]="Wrong type for input argument #%d: A function is expected.\n";
+ const int errNum=999;
+ //get variable address
+ sciErr = getVarAddressFromPosition(pvApiCtx, 1, dest);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 1;
+ }
+ //check that the variable is necessarily a function
+ sciErr = getVarType(pvApiCtx, *dest, &iType);
+ if(sciErr.iErr || iType != sci_c_function)
+ {
+ Scierror(errNum,errMsg,argNum);
+ return 1;
+ }
+ return 0;
+
+}
+
+
int getDoubleFromScilab(int argNum, double *dest)
{
//data declarations
@@ -187,6 +208,29 @@ int getFixedSizeDoubleMatrixInList(int argNum, int itemPos, int rows, int cols,
printError(&sciErr, 0);
return 1;
}
+ return 0;
+}
+
+int getStringFromScilab(int argNum,char **dest)
+{
+ int *varAddress,inputMatrixRows,inputMatrixCols;
+ SciErr sciErr;
+ sciErr = getVarAddressFromPosition(pvApiCtx, argNum, &varAddress);
+
+ //check whether there is an error or not.
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 1;
+ }
+ if ( !isStringType(pvApiCtx,varAddress) )
+ {
+ Scierror(999,"Wrong type for input argument 1: A file name is expected.\n");
+ return 1;
+ }
+ //read the value in that pointer pointing to file name
+ getAllocatedSingleString(pvApiCtx, varAddress, dest);
+
}
int return0toScilab()
@@ -255,3 +299,4 @@ int returnIntegerMatrixToScilab(int itemPos, int rows, int cols, int *dest)
return 0;
}
+
diff --git a/sci_gateway/cpp/sci_iofunc.hpp b/sci_gateway/cpp/sci_iofunc.hpp
index fc379f4..2c84c82 100644
--- a/sci_gateway/cpp/sci_iofunc.hpp
+++ b/sci_gateway/cpp/sci_iofunc.hpp
@@ -1,25 +1,19 @@
-// Copyright (C) 2015 - IIT Bombay - FOSSEE
-//
-// This file must be used under the terms of the CeCILL.
-// This source file is licensed as described in the file COPYING, which
-// you should have received as part of this distribution. The terms
-// are also available at
-// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
-// Author: Keyur Joshi, Harpreet Singh
-// Organization: FOSSEE, IIT Bombay
-// Email: toolbox@scilab.in
+// Symphony Toolbox for Scilab
+// (Declaration of) Functions for input and output from Scilab
+// By Keyur Joshi
#ifndef SCI_IOFUNCHEADER
#define SCI_IOFUNCHEADER
//input
+int getFunctionFromScilab(int argNum, int **dest);
int getDoubleFromScilab(int argNum, double *dest);
int getUIntFromScilab(int argNum, int *dest);
int getIntFromScilab(int argNum, int *dest);
int getFixedSizeDoubleMatrixFromScilab(int argNum, int rows, int cols, double **dest);
int getDoubleMatrixFromScilab(int argNum, int *rows, int *cols, double **dest);
int getFixedSizeDoubleMatrixInList(int argNum, int itemPos, int rows, int cols, double **dest);
-
+int getStringFromScilab(int argNum,char** dest);
//output
int return0toScilab();
diff --git a/sci_gateway/cpp/sci_ipoptfminbnd.cpp b/sci_gateway/cpp/sci_ipoptfminbnd.cpp
new file mode 100644
index 0000000..aa5addf
--- /dev/null
+++ b/sci_gateway/cpp/sci_ipoptfminbnd.cpp
@@ -0,0 +1,195 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: R.Vidyadhar & Vignesh Kannan
+// Organization: FOSSEE, IIT Bombay
+// Email: rvidhyadar@gmail.com & vignesh2496@gmail.com
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+
+
+#include "sci_iofunc.hpp"
+#include "IpIpoptApplication.hpp"
+#include "minbndNLP.hpp"
+#include <IpSolveStatistics.hpp>
+
+extern "C"
+{
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <BOOL.h>
+#include <localization.h>
+#include <sciprint.h>
+#include <iostream>
+
+using namespace std;
+
+int sci_solveminbndp(char *fname)
+{
+ using namespace Ipopt;
+
+ CheckInputArgument(pvApiCtx, 5, 5);
+ CheckOutputArgument(pvApiCtx, 9, 9);
+
+ // Error management variable
+ SciErr sciErr;
+
+ //Function pointers,lower bound and upper bound pointers
+ int* funptr=NULL;
+ int* gradhesptr=NULL;
+ double* varLB=NULL;
+ double* varUB=NULL;
+
+ // Input arguments
+ double *cpu_time=NULL,*max_iter=NULL,*tol_val=NULL;
+ static unsigned int nVars = 0,nCons = 0;
+ unsigned int temp1 = 0,temp2 = 0, iret = 0;
+ int x1_rows, x1_cols, x2_rows, x2_cols;
+
+ // Output arguments
+ double *fX = NULL, ObjVal=0,iteration=0,cpuTime=0,fobj_eval=0;
+ double *fZl=NULL;
+ double *fZu=NULL;
+ double dual_inf, constr_viol, complementarity, kkt_error;
+ int rstatus = 0;
+ int int_fobj_eval, int_constr_eval, int_fobj_grad_eval, int_constr_jac_eval, int_hess_eval;
+
+ ////////// Manage the input argument //////////
+
+ //Objective Function
+ if(getFunctionFromScilab(1,&funptr))
+ {
+ return 1;
+ }
+
+ //Function for gradient and hessian
+ if(getFunctionFromScilab(2,&gradhesptr))
+ {
+ return 1;
+ }
+
+ //x1(lower bound) matrix from scilab
+ if(getDoubleMatrixFromScilab(3, &x1_rows, &x1_cols, &varLB))
+ {
+ return 1;
+ }
+
+ //x2(upper bound) matrix from scilab
+ if(getDoubleMatrixFromScilab(4, &x2_rows, &x2_cols, &varUB))
+ {
+ return 1;
+ }
+
+ //Getting number of iterations
+ if(getFixedSizeDoubleMatrixInList(5,2,temp1,temp2,&max_iter))
+ {
+ return 1;
+ }
+
+ //Getting Cpu Time
+ if(getFixedSizeDoubleMatrixInList(5,4,temp1,temp2,&cpu_time))
+ {
+ return 1;
+ }
+
+ //Getting Tolerance Value
+ if(getFixedSizeDoubleMatrixInList(5,6,temp1,temp2,&tol_val))
+ {
+ return 1;
+ }
+
+
+ //Initialization of parameters
+ nVars=x1_rows;
+ nCons=0;
+
+ // Starting Ipopt
+
+ SmartPtr<minbndNLP> Prob = new minbndNLP(nVars,nCons,varLB,varUB);
+
+ SmartPtr<IpoptApplication> app = IpoptApplicationFactory();
+ app->RethrowNonIpoptException(true);
+
+ ////////// Managing the parameters //////////
+
+ app->Options()->SetNumericValue("tol", *tol_val);
+ app->Options()->SetIntegerValue("max_iter", (int)*max_iter);
+ app->Options()->SetNumericValue("max_cpu_time", *cpu_time);
+
+ ///////// Initialize the IpoptApplication and process the options /////////
+ ApplicationReturnStatus status;
+ status = app->Initialize();
+ if (status != Solve_Succeeded) {
+ sciprint("\n*** Error during initialization!\n");
+ return (int) status;
+ }
+ // Ask Ipopt to solve the problem
+ status = app->OptimizeTNLP(Prob);
+
+ //Get the solve statistics
+ cpuTime = app->Statistics()->TotalCPUTime();
+ app->Statistics()->NumberOfEvaluations(int_fobj_eval, int_constr_eval, int_fobj_grad_eval, int_constr_jac_eval, int_hess_eval);
+ app->Statistics()->Infeasibilities(dual_inf, constr_viol, complementarity, kkt_error);
+ rstatus = Prob->returnStatus();
+
+ ////////// Manage the output argument //////////
+
+
+ fX = Prob->getX();
+ ObjVal = Prob->getObjVal();
+ iteration = Prob->iterCount();
+ fobj_eval=(double)int_fobj_eval;
+ fZl = Prob->getZl();
+ fZu = Prob->getZu();
+
+ if (returnDoubleMatrixToScilab(1, 1, nVars, fX))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(2, 1, 1, &ObjVal))
+ {
+ return 1;
+ }
+
+ if (returnIntegerMatrixToScilab(3, 1, 1, &rstatus))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(4, 1, 1, &iteration))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(5, 1, 1, &cpuTime))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(6, 1, 1, &fobj_eval))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(7, 1, 1, &dual_inf))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(8, 1, nVars, fZl))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(9, 1, nVars, fZu))
+ {
+ return 1;
+ }
+
+
+ return 0;
+}
+}
diff --git a/sci_gateway/cpp/sci_ipoptfmincon.cpp b/sci_gateway/cpp/sci_ipoptfmincon.cpp
new file mode 100644
index 0000000..551af41
--- /dev/null
+++ b/sci_gateway/cpp/sci_ipoptfmincon.cpp
@@ -0,0 +1,273 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: R.Vidyadhar & Vignesh Kannan
+// Organization: FOSSEE, IIT Bombay
+// Email: rvidhyadar@gmail.com & vignesh2496@gmail.com
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+
+
+#include "sci_iofunc.hpp"
+#include "IpIpoptApplication.hpp"
+#include "minconNLP.hpp"
+#include <IpSolveStatistics.hpp>
+
+extern "C"
+{
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <BOOL.h>
+#include <localization.h>
+#include <sciprint.h>
+#include <iostream>
+
+using namespace std;
+
+int sci_solveminconp(char *fname)
+{
+ using namespace Ipopt;
+
+ CheckInputArgument(pvApiCtx, 20, 20);
+ CheckOutputArgument(pvApiCtx, 12, 12);
+
+ // Error management variable
+ SciErr sciErr;
+
+ //Function pointers, input matrix(Starting point) pointer, flag variable
+ int* funptr=NULL;
+ int* gradhesptr=NULL;
+ double *x0ptr=NULL, *lbptr=NULL, *ubptr=NULL,*Aptr=NULL, *bptr=NULL, *Aeqptr=NULL, *beqptr=NULL;
+ double flag1=0,flag2=0,flag3=0,nonlinCon=0,nonlinIneqCon=0;
+
+
+ // Input arguments
+ double *cpu_time=NULL,*max_iter=NULL;
+ static unsigned int nVars = 0,nCons = 0;
+ unsigned int temp1 = 0,temp2 = 0, iret = 0;
+ int x0_rows=0, x0_cols=0, lb_rows=0, lb_cols=0, ub_rows=0, ub_cols=0, A_rows=0, A_cols=0, b_rows=0, b_cols=0, Aeq_rows=0, Aeq_cols=0, beq_rows=0, beq_cols=0;
+
+ // Output arguments
+ double *fX = NULL, ObjVal=0,iteration=0,cpuTime=0,fobj_eval=0;
+ double dual_inf, constr_viol, complementarity, kkt_error;
+ double *fGrad = NULL;
+ double *fHess = NULL;
+ double *fLambda = NULL;
+ double *fZl=NULL;
+ double *fZu=NULL;
+ int rstatus = 0;
+ int int_fobj_eval, int_constr_eval, int_fobj_grad_eval, int_constr_jac_eval, int_hess_eval;
+
+ ////////// Manage the input argument //////////
+
+ //Objective Function
+ if(getFunctionFromScilab(1,&funptr))
+ {
+ return 1;
+ }
+
+ //Function for gradient and hessian
+ if(getFunctionFromScilab(2,&gradhesptr))
+ {
+ return 1;
+ }
+
+ //x0(starting point) matrix from scilab
+ if(getDoubleMatrixFromScilab(18, &x0_rows, &x0_cols, &x0ptr))
+ {
+ return 1;
+ }
+
+ //Getting number of iterations
+ if(getFixedSizeDoubleMatrixInList(19,2,temp1,temp2,&max_iter))
+ {
+ return 1;
+ }
+
+ //Getting Cpu Time
+ if(getFixedSizeDoubleMatrixInList(19,4,temp1,temp2,&cpu_time))
+ {
+ return 1;
+ }
+
+ //Getting matrix representing linear inequality constraints
+ if(getDoubleMatrixFromScilab(3, &A_rows, &A_cols, &Aptr))
+ {
+ return 1;
+ }
+
+ //Getting matrix representing bounds of linear inequality constraints
+ if(getDoubleMatrixFromScilab(4, &b_rows, &b_cols, &bptr))
+ {
+ return 1;
+ }
+
+ //Getting matrix representing linear equality constraints
+ if(getDoubleMatrixFromScilab(5, &Aeq_rows, &Aeq_cols, &Aeqptr))
+ {
+ return 1;
+ }
+
+ //Getting matrix representing bounds of linear inequality constraints
+ if(getDoubleMatrixFromScilab(6, &beq_rows, &beq_cols, &beqptr))
+ {
+ return 1;
+ }
+
+ //Getting matrix representing linear inequality constraints
+ if(getDoubleMatrixFromScilab(7, &lb_rows, &lb_cols, &lbptr))
+ {
+ return 1;
+ }
+
+ //Getting matrix representing linear inequality constraints
+ if(getDoubleMatrixFromScilab(8, &ub_rows, &ub_cols, &ubptr))
+ {
+ return 1;
+ }
+
+ //Number of non-linear constraints
+ if(getDoubleFromScilab(9, &nonlinCon))
+ {
+ return 1;
+ }
+
+ //Number of non-linear inequality constraints
+ if(getDoubleFromScilab(10, &nonlinIneqCon))
+ {
+ return 1;
+ }
+
+ //Getting the required flag variables
+
+ if(getDoubleFromScilab(12, &flag1))
+ {
+ return 1;
+ }
+
+ if(getDoubleFromScilab(14, &flag2))
+ {
+ return 1;
+ }
+
+ if(getDoubleFromScilab(16, &flag3))
+ {
+ return 1;
+ }
+
+ //Number of variables and constraints
+ nVars = x0_cols;
+ nCons = A_rows + Aeq_rows + nonlinCon;
+
+
+ // Starting Ipopt
+
+ SmartPtr<minconNLP> Prob = new minconNLP(nVars, nCons, x0ptr, Aptr, bptr, Aeqptr, beqptr, A_rows, A_cols, b_rows, b_cols, Aeq_rows, Aeq_cols, beq_rows, beq_cols, lbptr, ubptr, nonlinCon, nonlinIneqCon, flag1, flag2, flag3);
+ SmartPtr<IpoptApplication> app = IpoptApplicationFactory();
+ app->RethrowNonIpoptException(true);
+
+ ////////// Managing the parameters //////////
+
+ app->Options()->SetNumericValue("tol", 1e-7);
+ app->Options()->SetIntegerValue("max_iter", (int)*max_iter);
+ app->Options()->SetNumericValue("max_cpu_time", *cpu_time);
+
+ ///////// Initialize the IpoptApplication and process the options /////////
+ ApplicationReturnStatus status;
+ status = app->Initialize();
+ if (status != Solve_Succeeded)
+ {
+ sciprint("\n*** Error during initialization!\n");
+ return (int) status;
+ }
+
+ // Ask Ipopt to solve the problem
+ status = app->OptimizeTNLP(Prob);
+
+ //Get the solve statistics
+ cpuTime = app->Statistics()->TotalCPUTime();
+ app->Statistics()->NumberOfEvaluations(int_fobj_eval, int_constr_eval, int_fobj_grad_eval, int_constr_jac_eval, int_hess_eval);
+ app->Statistics()->Infeasibilities(dual_inf, constr_viol, complementarity, kkt_error);
+ rstatus = Prob->returnStatus();
+ fobj_eval=(double)int_fobj_eval;
+
+ ////////// Manage the output argument //////////
+
+ fX = Prob->getX();
+ fGrad = Prob->getGrad();
+ fHess = Prob->getHess();
+ fLambda = Prob->getLambda();
+ fZl = Prob->getZl();
+ fZu = Prob->getZu();
+ ObjVal = Prob->getObjVal();
+ iteration = Prob->iterCount();
+
+ if (returnDoubleMatrixToScilab(1, 1, nVars, fX))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(2, 1, 1, &ObjVal))
+ {
+ return 1;
+ }
+
+ if (returnIntegerMatrixToScilab(3, 1, 1, &rstatus))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(4, 1, 1, &iteration))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(5, 1, 1, &cpuTime))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(6, 1, 1, &fobj_eval))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(7, 1, 1, &dual_inf))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(8, 1, nCons, fLambda))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(9, 1, nVars, fZl))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(10, 1, nVars, fZu))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(11, 1, nVars, fGrad))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(12, 1, nVars*nVars, fHess))
+ {
+ return 1;
+ }
+
+ // As the SmartPtrs go out of scope, the reference count
+ // will be decremented and the objects will automatically
+ // be deleted.*/
+
+ return 0;
+}
+}
diff --git a/sci_gateway/cpp/sci_ipoptfminunc.cpp b/sci_gateway/cpp/sci_ipoptfminunc.cpp
new file mode 100644
index 0000000..19c59ac
--- /dev/null
+++ b/sci_gateway/cpp/sci_ipoptfminunc.cpp
@@ -0,0 +1,201 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: R.Vidyadhar & Vignesh Kannan
+// Organization: FOSSEE, IIT Bombay
+// Email: rvidhyadar@gmail.com & vignesh2496@gmail.com
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+
+
+#include "sci_iofunc.hpp"
+#include "IpIpoptApplication.hpp"
+#include "minuncNLP.hpp"
+#include <IpSolveStatistics.hpp>
+
+extern "C"
+{
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <BOOL.h>
+#include <localization.h>
+#include <sciprint.h>
+#include <iostream>
+
+using namespace std;
+
+int sci_solveminuncp(char *fname)
+{
+ using namespace Ipopt;
+
+ CheckInputArgument(pvApiCtx, 8, 8);
+ CheckOutputArgument(pvApiCtx, 9, 9);
+
+ // Error management variable
+ SciErr sciErr;
+
+ //Function pointers, input matrix(Starting point) pointer, flag variable
+ int* funptr=NULL;
+ int* gradhesptr=NULL;
+ double* x0ptr=NULL;
+ double flag1,flag2;
+
+
+ // Input arguments
+ double *cpu_time=NULL,*max_iter=NULL;
+ static unsigned int nVars = 0,nCons = 0;
+ unsigned int temp1 = 0,temp2 = 0, iret = 0;
+ int x0_rows, x0_cols;
+
+ // Output arguments
+ double *fX = NULL, ObjVal=0,iteration=0,cpuTime=0,fobj_eval=0;
+ double dual_inf, constr_viol, complementarity, kkt_error;
+ double *fGrad= NULL;
+ double *fHess= NULL;
+ int rstatus = 0;
+ int int_fobj_eval, int_constr_eval, int_fobj_grad_eval, int_constr_jac_eval, int_hess_eval;
+
+ ////////// Manage the input argument //////////
+
+ //Objective Function
+ if(getFunctionFromScilab(1,&funptr))
+ {
+ return 1;
+ }
+
+ //Function for gradient and hessian
+ if(getFunctionFromScilab(2,&gradhesptr))
+ {
+ return 1;
+ }
+
+ //Flag for Gradient from Scilab
+ if(getDoubleFromScilab(3, &flag1))
+ {
+ return 1;
+ }
+
+ //Flag for Hessian from Scilab
+ if(getDoubleFromScilab(5, &flag2))
+ {
+ return 1;
+ }
+
+ //x0(starting point) matrix from scilab
+ if(getDoubleMatrixFromScilab(7, &x0_rows, &x0_cols, &x0ptr))
+ {
+ return 1;
+ }
+
+ //Getting number of iterations
+ if(getFixedSizeDoubleMatrixInList(8,2,temp1,temp2,&max_iter))
+ {
+ return 1;
+ }
+
+ //Getting Cpu Time
+ if(getFixedSizeDoubleMatrixInList(8,4,temp1,temp2,&cpu_time))
+ {
+ return 1;
+ }
+
+
+ //Initialization of parameters
+ nVars=x0_cols;
+ nCons=0;
+
+ // Starting Ipopt
+
+ SmartPtr<minuncNLP> Prob = new minuncNLP(nVars, nCons, x0ptr, flag1, flag2);
+ SmartPtr<IpoptApplication> app = IpoptApplicationFactory();
+ app->RethrowNonIpoptException(true);
+
+ ////////// Managing the parameters //////////
+
+ app->Options()->SetNumericValue("tol", 1e-7);
+ app->Options()->SetIntegerValue("max_iter", (int)*max_iter);
+ app->Options()->SetNumericValue("max_cpu_time", *cpu_time);
+
+ ///////// Initialize the IpoptApplication and process the options /////////
+ ApplicationReturnStatus status;
+ status = app->Initialize();
+ if (status != Solve_Succeeded)
+ {
+ sciprint("\n*** Error during initialization!\n");
+ return (int) status;
+ }
+ // Ask Ipopt to solve the problem
+
+ status = app->OptimizeTNLP(Prob);
+
+ cpuTime = app->Statistics()->TotalCPUTime();
+
+ app->Statistics()->NumberOfEvaluations(int_fobj_eval, int_constr_eval, int_fobj_grad_eval, int_constr_jac_eval, int_hess_eval);
+
+ app->Statistics()->Infeasibilities(dual_inf, constr_viol, complementarity, kkt_error);
+
+ rstatus = Prob->returnStatus();
+
+ ////////// Manage the output argument //////////
+
+ fX = Prob->getX();
+ fGrad = Prob->getGrad();
+ fHess = Prob->getHess();
+ ObjVal = Prob->getObjVal();
+ iteration = Prob->iterCount();
+ fobj_eval = (double)int_fobj_eval;
+
+ if (returnDoubleMatrixToScilab(1, 1, nVars, fX))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(2, 1, 1, &ObjVal))
+ {
+ return 1;
+ }
+
+ if (returnIntegerMatrixToScilab(3, 1, 1, &rstatus))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(4, 1, 1, &iteration))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(5, 1, 1, &cpuTime))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(6, 1, 1, &fobj_eval))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(7, 1, 1, &dual_inf))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(8, 1, nVars, fGrad))
+ {
+ return 1;
+ }
+
+ if (returnDoubleMatrixToScilab(9, 1, nVars*nVars, fHess))
+ {
+ return 1;
+ }
+
+ // As the SmartPtrs go out of scope, the reference count
+ // will be decremented and the objects will automatically
+ // be deleted.*/
+
+ return 0;
+}
+}
diff --git a/sci_gateway/cpp/sci_minbndNLP.cpp b/sci_gateway/cpp/sci_minbndNLP.cpp
new file mode 100644
index 0000000..9a7024e
--- /dev/null
+++ b/sci_gateway/cpp/sci_minbndNLP.cpp
@@ -0,0 +1,353 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: R.Vidyadhar & Vignesh Kannan
+// Organization: FOSSEE, IIT Bombay
+// Email: rvidhyadar@gmail.com & vignesh2496@gmail.com
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+
+
+#include "minbndNLP.hpp"
+#include "IpIpoptData.hpp"
+#include "sci_iofunc.hpp"
+
+extern "C"
+{
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <BOOL.h>
+#include <localization.h>
+#include <sciprint.h>
+#include <string.h>
+#include <assert.h>
+#include <iostream>
+
+using namespace std;
+using namespace Ipopt;
+
+minbndNLP::~minbndNLP()
+{
+ free(finalX_);
+ free(finalZu_);
+ free(finalZl_);
+}
+
+//get NLP info such as number of variables,constraints,no.of elements in jacobian and hessian to allocate memory
+bool minbndNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g, Index& nnz_h_lag, IndexStyleEnum& index_style)
+{
+ n=numVars_; // Number of variables
+ m=numConstr_; // Number of constraints
+ nnz_jac_g = 0; // No. of elements in Jacobian of constraints
+ nnz_h_lag = n*(n+1)/2; // No. of elements in lower traingle of Hessian of the Lagrangian.
+ index_style=C_STYLE; // Index style of matrices
+
+ return true;
+}
+
+//get variable and constraint bound info
+bool minbndNLP::get_bounds_info(Index n, Number* x_l, Number* x_u, Index m, Number* g_l, Number* g_u)
+{
+ for(Index i=0;i<n;i++)
+ {
+ x_l[i]=varLB_[i]+0.0000001;
+ x_u[i]=varUB_[i]-0.0000001;
+ }
+
+ g_l=NULL;
+ g_u=NULL;
+
+ return true;
+}
+
+// return the value of the constraints: g(x)
+bool minbndNLP::eval_g(Index n, const Number* x, bool new_x, Index m, Number* g)
+{
+ g=NULL;
+ return true;
+}
+
+// return the structure or values of the jacobian
+bool minbndNLP::eval_jac_g(Index n, const Number* x, bool new_x,Index m, Index nele_jac, Index* iRow, Index *jCol,Number* values)
+{
+ if (values == NULL)
+ {
+ // return the structure of the jacobian of the constraints
+ iRow=NULL;
+ jCol=NULL;
+ }
+ else
+ {
+ values=NULL;
+ }
+
+ return true;
+}
+
+//get value of objective function at vector x
+bool minbndNLP::eval_f(Index n, const Number* x, bool new_x, Number& obj_value)
+{
+ int* funptr=NULL;
+ double check;
+ Index i;
+ if(getFunctionFromScilab(1,&funptr))
+ {
+ return 1;
+ }
+ char name[20]="f";
+ double obj=0;
+ double *xNew=x;
+ createMatrixOfDouble(pvApiCtx, 3, 1, numVars_, xNew);
+ int positionFirstElementOnStackForScilabFunction = 3;
+ int numberOfRhsOnScilabFunction = 1;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *funptr;
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+ if(getDoubleFromScilab(4,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+
+ return true;
+ }
+ else
+ {
+ if(getDoubleFromScilab(3,&obj))
+ {
+ sciprint("No obj value");
+ return 1;
+ }
+ obj_value=obj;
+ return true;
+ }
+}
+
+//get value of gradient of objective function at vector x.
+bool minbndNLP::eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f)
+{
+ int* gradhessptr=NULL;
+ if(getFunctionFromScilab(2,&gradhessptr))
+ {
+ return 1;
+ }
+ double *xNew=x;
+ Index i;
+ double t=1;
+ createMatrixOfDouble(pvApiCtx, 3, 1, numVars_, xNew);
+ createScalarDouble(pvApiCtx, 4,t);
+ int positionFirstElementOnStackForScilabFunction = 3;
+ int numberOfRhsOnScilabFunction = 2;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *gradhessptr;
+ char name[20]="gradhess";
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+
+
+ double* resg;
+ double check;
+ int x0_rows,x0_cols;
+
+ if(getDoubleFromScilab(4,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ /*sciprint("Gradient is not defined at the point [");
+ for(i=0;i<numVars_;i++)
+ {
+ if(i==numVars_-1)
+ sciprint("%d",x[i]);
+ else
+ sciprint("%d,",x[i]);
+ }
+ sciprint("], So the Point is skipped by IPopt during Iterations");*/
+ return true;
+ }
+ else
+ {
+ if(getDoubleMatrixFromScilab(3, &x0_rows, &x0_cols, &resg))
+ {
+ return true;
+ }
+
+ for(i=0;i<numVars_;i++)
+ {
+ grad_f[i]=resg[i];
+ }
+ return true;
+ }
+}
+
+// This method sets initial values for required vectors . For now we are assuming 0 to all values.
+bool minbndNLP::get_starting_point(Index n, bool init_x, Number* x,bool init_z, Number* z_L, Number* z_U,Index m, bool init_lambda,Number* lambda)
+{
+ Index i;
+ for(i=0;i<n;i++)
+ x[i]=NULL;
+
+ return true;
+}
+
+/*
+ * Return either the sparsity structure of the Hessian of the Lagrangian,
+ * or the values of the Hessian of the Lagrangian for the given values for
+ * x,lambda,obj_factor.
+*/
+
+bool minbndNLP::eval_h(Index n, const Number* x, bool new_x,Number obj_factor, Index m, const Number* lambda,bool new_lambda, Index nele_hess, Index* iRow,Index* jCol, Number* values)
+{
+
+
+ if (values==NULL)
+ {
+ Index idx=0;
+ for (Index row = 0; row < numVars_; row++)
+ {
+ for (Index col = 0; col <= row; col++)
+ {
+ iRow[idx] = row;
+ jCol[idx] = col;
+ idx++;
+ }
+ }
+ }
+ else
+ {
+ int* gradhessptr=NULL;
+ if(getFunctionFromScilab(2,&gradhessptr))
+ {
+ return 1;
+ }
+
+ double *xNew=x;
+ Index i;
+ double t=2;
+
+ createMatrixOfDouble(pvApiCtx, 3, 1, numVars_, xNew);
+ createScalarDouble(pvApiCtx, 4,t);
+ int positionFirstElementOnStackForScilabFunction = 3;
+ int numberOfRhsOnScilabFunction = 2;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *gradhessptr;
+ char name[20]="gradhess";
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+
+ double* resh;
+ double check;
+ int x0_rows,x0_cols;
+
+ if(getDoubleFromScilab(4,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ /*sciprint("Hessian is not defined at the point [");
+ for(i=0;i<numVars_;i++)
+ {
+ if(i=numVars_-1)
+ sciprint("%d",x[i]);
+ else
+ sciprint("%d,",x[i]);
+ }
+ sciprint("], So the Point is skipped by IPopt during Iterations");*/
+ return true;
+ }
+ else
+ {
+ if(getDoubleMatrixFromScilab(3, &x0_rows, &x0_cols, &resh))
+ {
+ sciprint("No results");
+ return 1;
+ }
+ Index index=0;
+ for (Index row=0;row < numVars_ ;++row)
+ {
+ for (Index col=0; col <= row; ++col)
+ {
+ values[index++]=obj_factor*(resh[numVars_*row+col]);
+ }
+ }
+ }
+
+ }
+
+ return true;
+}
+
+
+void minbndNLP::finalize_solution(SolverReturn status,Index n, const Number* x, const Number* z_L, const Number* z_U,Index m, const Number* g, const Number* lambda, Number obj_value,const IpoptData* ip_data,IpoptCalculatedQuantities* ip_cq)
+{
+ finalX_ = (double*)malloc(sizeof(double) * numVars_ * 1);
+ for (Index i=0; i<numVars_; i++)
+ {
+ finalX_[i] = x[i];
+ }
+
+ finalZl_ = (double*)malloc(sizeof(double) * numVars_ * 1);
+ for (Index i=0; i<n; i++)
+ {
+ finalZl_[i] = z_L[i];
+ }
+
+ finalZu_ = (double*)malloc(sizeof(double) * numVars_ * 1);
+ for (Index i=0; i<n; i++)
+ {
+ finalZu_[i] = z_U[i];
+ }
+
+ finalObjVal_ = obj_value;
+ status_ = status;
+ if (status_ == 0 | status_ == 1 | status_ == 2)
+ {
+ iter_ = ip_data->iter_count();
+ }
+}
+
+
+const double * minbndNLP::getX()
+{
+ return finalX_;
+}
+
+double minbndNLP::getObjVal()
+{
+ return finalObjVal_;
+}
+
+const double * minbndNLP::getZl()
+{
+ return finalZl_;
+}
+
+const double * minbndNLP::getZu()
+{
+ return finalZu_;
+}
+
+double minbndNLP::iterCount()
+{
+ return (double)iter_;
+}
+
+int minbndNLP::returnStatus()
+{
+ return status_;
+}
+
+}
+
diff --git a/sci_gateway/cpp/sci_minconNLP.cpp b/sci_gateway/cpp/sci_minconNLP.cpp
new file mode 100644
index 0000000..2c6d6af
--- /dev/null
+++ b/sci_gateway/cpp/sci_minconNLP.cpp
@@ -0,0 +1,797 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: R.Vidyadhar & Vignesh Kannan
+// Organization: FOSSEE, IIT Bombay
+// Email: rvidhyadar@gmail.com & vignesh2496@gmail.com
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+
+
+#include "minconNLP.hpp"
+#include "IpIpoptData.hpp"
+#include "sci_iofunc.hpp"
+
+extern "C"
+{
+
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <BOOL.h>
+#include <localization.h>
+#include <sciprint.h>
+#include <string.h>
+#include <assert.h>
+#include <iostream>
+
+using namespace std;
+using namespace Ipopt;
+
+minconNLP::~minconNLP()
+{
+ free(finalX_);
+ free(finalGradient_);
+ free(finalHessian_);
+ free(finalZu_);
+ free(finalZl_);
+ free(finalLambda_);
+}
+
+//get NLP info such as number of variables,constraints,no.of elements in jacobian and hessian to allocate memory
+bool minconNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g, Index& nnz_h_lag, IndexStyleEnum& index_style)
+{
+ finalGradient_ = (double*)malloc(sizeof(double) * numVars_ * 1);
+ finalHessian_ = (double*)malloc(sizeof(double) * numVars_ * numVars_);
+
+ n=numVars_; // Number of variables
+ m=numConstr_; // Number of constraints
+
+ nnz_jac_g = n*m; // No. of elements in Jacobian of constraints
+ nnz_h_lag = n*(n+1)/2; // No. of elements in lower traingle of Hessian of the Lagrangian.
+
+ index_style=C_STYLE; // Index style of matrices
+ return true;
+}
+
+//get variable and constraint bound info
+bool minconNLP::get_bounds_info(Index n, Number* x_l, Number* x_u, Index m, Number* g_l, Number* g_u)
+{
+ unsigned int i;
+
+ //assigning bounds for the variables
+ for(i=0;i<n;i++)
+ {
+ x_l[i]=varLB_[i];
+ x_u[i]=varUB_[i];
+ }
+
+ if(m==0)
+ {
+ g_l=NULL;
+ g_u=NULL;
+ }
+
+ else
+ {
+ unsigned int c=0;
+
+ //bounds of non-linear inequality constraints
+ for(i=0;i<nonlinIneqCon_;i++)
+ {
+ g_l[c]=-1.0e19;
+ g_u[c]=0;
+ c++;
+ }
+
+ //bounds of non-linear equality constraints
+ for(i=0;i<nonlinCon_-nonlinIneqCon_;i++)
+ {
+ g_l[c]=g_u[c]=0;
+ c++;
+ }
+
+ //bounds of linear equality constraints
+ for(i=0;i<Aeqrows_;i++)
+ {
+ g_l[c]=g_u[c]=beq_[i];
+ c++;
+ }
+
+ //bounds of linear inequality constraints
+ for(i=0;i<Arows_;i++)
+ {
+ g_l[c]=-1.0e19;
+ g_u[c]=b_[i];
+
+ c++;
+ }
+
+ }
+
+ return true;
+}
+
+// return the value of the constraints: g(x)
+bool minconNLP::eval_g(Index n, const Number* x, bool new_x, Index m, Number* g)
+{
+ // return the value of the constraints: g(x)
+
+ unsigned int i;
+ unsigned int j;
+
+ if(m==0)
+ g=NULL;
+
+ else
+ {
+ unsigned int c=0;
+
+ //value of non-linear constraints
+ if(nonlinCon_!=0)
+ {
+ int* constr=NULL;
+ if(getFunctionFromScilab(11,&constr))
+ {
+ return 1;
+ }
+ char name[20]="addnlc1";
+ double *xNew=x;
+ double check;
+ createMatrixOfDouble(pvApiCtx, 18, 1, numVars_, xNew);
+ int positionFirstElementOnStackForScilabFunction = 18;
+ int numberOfRhsOnScilabFunction = 1;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *constr;
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+
+ double* resc;
+ int xC_rows,xC_cols;
+ if(getDoubleFromScilab(19,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ return true;
+ }
+ else
+ {
+ if(getDoubleMatrixFromScilab(18, &xC_rows, &xC_cols, &resc))
+ {
+ sciprint("No results");
+ return 1;
+
+ }
+
+ for(i=0;i<nonlinCon_;i++)
+ {
+ g[c]=resc[i];
+ c++;
+ }
+ }
+ }
+
+ //value of linear equality constraints
+ for(i=0;i<Aeqrows_;i++)
+ {
+ g[c]=0;
+ for(j=0;j<Aeqcols_;j++)
+ g[c] += Aeq_[j*Aeqrows_+i]*x[j];
+ c++;
+ }
+
+ //value of linear inequality constraints
+ for(i=0;i<Arows_;i++)
+ {
+ g[c]=0;
+ for(j=0;j<Acols_;j++)
+ g[c] += A_[j*Arows_+i]*x[j];
+ c++;
+ }
+
+ }
+
+ return true;
+}
+
+// return the structure or values of the jacobian
+bool minconNLP::eval_jac_g(Index n, const Number* x, bool new_x,Index m, Index nele_jac, Index* iRow, Index *jCol,Number* values)
+{
+ if (values == NULL)
+ {
+ if(m==0)// return the structure of the jacobian of the constraints
+ {
+ iRow=NULL;
+ jCol=NULL;
+ }
+
+ else
+ {
+ unsigned int i,j,idx=0;
+ for(int i=0;i<m;i++)
+ for(j=0;j<n;j++)
+ {
+ iRow[idx]=i;
+ jCol[idx]=j;
+ idx++;
+ }
+ }
+ }
+
+ else
+ {
+ if(m==0)
+ values=NULL;
+
+ else
+ {
+ unsigned int i,j,c=0;
+ double check;
+ //jacobian of non-linear constraints
+ if(nonlinCon_!=0)
+ {
+ if(flag3_==0)
+ {
+ int* gradhessptr=NULL;
+ if(getFunctionFromScilab(2,&gradhessptr))
+ {
+ return 1;
+ }
+ double *xNew=x;
+ double t=3;
+ createMatrixOfDouble(pvApiCtx, 18, 1, numVars_, xNew);
+ createScalarDouble(pvApiCtx, 19,t);
+ int positionFirstElementOnStackForScilabFunction = 18;
+ int numberOfRhsOnScilabFunction = 2;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *gradhessptr;
+ char name[20]="gradhess";
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+
+ double* resj;
+ int xJ_rows,xJ_cols;
+ if(getDoubleFromScilab(19,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ return true;
+ }
+ else
+ {
+ if(getDoubleMatrixFromScilab(18, &xJ_rows, &xJ_cols, &resj))
+ {
+ sciprint("No results");
+ return 1;
+ }
+
+ for(i=0;i<nonlinCon_;i++)
+ {
+ for(j=0;j<n;j++)
+ {
+ values[c] = resj[j*(int)nonlinCon_+i];
+ c++;
+ }
+ }
+ }
+ }
+
+ else
+ {
+ int* jacptr=NULL;
+ if(getFunctionFromScilab(17,&jacptr))
+ {
+ return 1;
+ }
+
+ double *xNew=x;
+ createMatrixOfDouble(pvApiCtx, 18, 1, numVars_, xNew);
+ int positionFirstElementOnStackForScilabFunction = 18;
+ int numberOfRhsOnScilabFunction = 1;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *jacptr;
+ char name[20]="addcGrad1";
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+
+ double* resj;
+ int xJ_rows,xJ_cols;
+ if(getDoubleFromScilab(19,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ return true;
+ }
+ else
+ {
+ if(getDoubleMatrixFromScilab(18, &xJ_rows, &xJ_cols, &resj))
+ {
+ sciprint("No results");
+ return 1;
+ }
+
+ for(i=0;i<nonlinCon_;i++)
+ for(j=0;j<n;j++)
+ {
+ values[c] = resj[j*(int)nonlinCon_+i];
+ c++;
+ }
+ }
+ }
+ }
+
+ //jacobian of linear equality constraints
+ for(i=0;i<Aeqrows_;i++)
+ {
+ for(j=0;j<Aeqcols_;j++)
+ {
+ values[c] = Aeq_[j*Aeqrows_+i];
+ c++;
+ }
+ }
+
+ //jacobian of linear inequality constraints
+ for(i=0;i<Arows_;i++)
+ {
+ for(j=0;j<Acols_;j++)
+ {
+ values[c] = A_[j*Arows_+i];
+ c++;
+ }
+ }
+
+ }
+ }
+
+ return true;
+}
+
+//get value of objective function at vector x
+bool minconNLP::eval_f(Index n, const Number* x, bool new_x, Number& obj_value)
+{
+ int* funptr=NULL;
+ if(getFunctionFromScilab(1,&funptr))
+ {
+ return 1;
+ }
+ char name[20]="f";
+ double obj=0;
+ double *xNew=x;
+ double check;
+ createMatrixOfDouble(pvApiCtx, 18, 1, numVars_, xNew);
+ int positionFirstElementOnStackForScilabFunction = 18;
+ int numberOfRhsOnScilabFunction = 1;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *funptr;
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+
+ if(getDoubleFromScilab(19,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ return true;
+ }
+ else
+ {
+ if(getDoubleFromScilab(18,&obj))
+ {
+ sciprint("No obj value");
+ return 1;
+ }
+ obj_value=obj;
+
+ return true;
+ }
+}
+
+//get value of gradient of objective function at vector x.
+bool minconNLP::eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f)
+{
+ if (flag1_==0)
+ {
+ int* gradhessptr=NULL;
+ if(getFunctionFromScilab(2,&gradhessptr))
+ {
+ return 1;
+ }
+ double *xNew=x;
+ double t=1;
+ createMatrixOfDouble(pvApiCtx, 18, 1, numVars_, xNew);
+ createScalarDouble(pvApiCtx, 19,t);
+ int positionFirstElementOnStackForScilabFunction = 18;
+ int numberOfRhsOnScilabFunction = 2;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *gradhessptr;
+ char name[20]="gradhess";
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+ }
+
+ else
+ {
+ int* gradptr=NULL;
+ if(getFunctionFromScilab(13,&gradptr))
+ {
+ return 1;
+ }
+ double *xNew=x;
+ createMatrixOfDouble(pvApiCtx, 18, 1, numVars_, xNew);
+ int positionFirstElementOnStackForScilabFunction = 18;
+ int numberOfRhsOnScilabFunction = 1;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *gradptr;
+ char name[20]="fGrad1";
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+ }
+
+ double* resg;
+ double check;
+ int x0_rows,x0_cols;
+ if(getDoubleFromScilab(19,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ return true;
+ }
+ else
+ {
+ if(getDoubleMatrixFromScilab(18, &x0_rows, &x0_cols, &resg))
+ {
+ sciprint("No results");
+ return 1;
+ }
+
+
+ Index i;
+ for(i=0;i<numVars_;i++)
+ {
+ grad_f[i]=resg[i];
+ finalGradient_[i]=resg[i];
+ }
+ }
+ return true;
+}
+
+// This method sets initial values for required vectors . For now we are assuming 0 to all values.
+bool minconNLP::get_starting_point(Index n, bool init_x, Number* x,bool init_z, Number* z_L, Number* z_U,Index m, bool init_lambda,Number* lambda)
+{
+ assert(init_x == true);
+ assert(init_z == false);
+ assert(init_lambda == false);
+ if (init_x == true)
+ { //we need to set initial values for vector x
+ for (Index var=0;var<n;var++)
+ x[var]=varGuess_[var];
+ }
+
+ return true;
+}
+
+/*
+ * Return either the sparsity structure of the Hessian of the Lagrangian,
+ * or the values of the Hessian of the Lagrangian for the given values for
+ * x,lambda,obj_factor.
+*/
+
+bool minconNLP::eval_h(Index n, const Number* x, bool new_x,Number obj_factor, Index m, const Number* lambda,bool new_lambda, Index nele_hess, Index* iRow,Index* jCol, Number* values)
+{
+ if (values==NULL)
+ {
+ Index idx=0;
+ for (Index row = 0; row < numVars_; row++)
+ {
+ for (Index col = 0; col <= row; col++)
+ {
+ iRow[idx] = row;
+ jCol[idx] = col;
+ idx++;
+ }
+ }
+ }
+
+ else
+ {
+ double check;
+ //hessian of the objective function
+ if(flag2_==0)
+ {
+ int* gradhessptr=NULL;
+ if(getFunctionFromScilab(2,&gradhessptr))
+ {
+ return 1;
+ }
+ double *xNew=x;
+ double t=2;
+ createMatrixOfDouble(pvApiCtx, 18, 1, numVars_, xNew);
+ createScalarDouble(pvApiCtx, 19,t);
+ int positionFirstElementOnStackForScilabFunction = 18;
+ int numberOfRhsOnScilabFunction = 2;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *gradhessptr;
+ char name[20]="gradhess";
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+
+ double* resTemph;
+ int x0_rows,x0_cols;
+ if(getDoubleFromScilab(19,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ return true;
+ }
+ else
+ {
+ if(getDoubleMatrixFromScilab(18, &x0_rows, &x0_cols, &resTemph))
+ {
+ sciprint("No results");
+ return 1;
+ }
+
+ double* resh=(double*)malloc(sizeof(double)*n*n);
+ Index i;
+ for(i=0;i<numVars_*numVars_;i++)
+ {
+ resh[i]=resTemph[i];
+ }
+
+ //sum of hessians of constraints each multiplied by its own lambda factor
+ double* sum=(double*)malloc(sizeof(double)*n*n);
+ if(nonlinCon_!=0)
+ {
+
+ int* gradhessptr=NULL;
+ if(getFunctionFromScilab(2,&gradhessptr))
+ {
+ return 1;
+ }
+
+ double *xNew=x;
+ double t=4;
+ createMatrixOfDouble(pvApiCtx, 18, 1, numVars_, xNew);
+ createScalarDouble(pvApiCtx, 19,t);
+ int positionFirstElementOnStackForScilabFunction = 18;
+ int numberOfRhsOnScilabFunction = 2;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *gradhessptr;
+ char name[20]="gradhess";
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+
+ double* resCh;
+ int xCh_rows,xCh_cols;
+ if(getDoubleFromScilab(19,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ return true;
+ }
+ else
+ {
+ if(getDoubleMatrixFromScilab(18, &xCh_rows, &xCh_cols, &resCh))
+ {
+ sciprint("No results");
+ return 1;
+ }
+
+ Index j;
+
+ for(i=0;i<numVars_*numVars_;i++)
+ {
+ sum[i]=0;
+ for(j=0;j<nonlinCon_;j++)
+ sum[i]+=lambda[j]*resCh[i*(int)nonlinCon_+j];
+ }
+ }
+ }
+
+ else
+ {
+ for(i=0;i<numVars_*numVars_;i++)
+ sum[i]=0;
+ }
+
+ //computing the lagrangian
+ Index index=0;
+ for (Index row=0;row < numVars_ ;++row)
+ {
+ for (Index col=0; col <= row; ++col)
+ {
+ values[index++]=obj_factor*(resh[numVars_*row+col])+sum[numVars_*row+col];
+ }
+ }
+
+ free(resh);
+ free(sum);
+ }
+ }
+ else
+ {
+ int* hessptr=NULL;
+ if(getFunctionFromScilab(15,&hessptr))
+ {
+ return 1;
+ }
+ double *xNew=x;
+ double *lambdaNew=lambda;
+ double objfac=obj_factor;
+ createMatrixOfDouble(pvApiCtx, 18, 1, numVars_, xNew);
+ createScalarDouble(pvApiCtx, 19,objfac);
+ createMatrixOfDouble(pvApiCtx, 20, 1, numConstr_, lambdaNew);
+ int positionFirstElementOnStackForScilabFunction = 18;
+ int numberOfRhsOnScilabFunction = 3;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *hessptr;
+ char name[20]="lHess1";
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+
+ double* resCh;
+ int xCh_rows,xCh_cols;
+ if(getDoubleFromScilab(19,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ return true;
+ }
+ else
+ {
+ if(getDoubleMatrixFromScilab(18, &xCh_rows, &xCh_cols, &resCh))
+ {
+ sciprint("No results");
+ return 1;
+ }
+
+ Index index=0;
+ for (Index row=0;row < numVars_ ;++row)
+ {
+ for (Index col=0; col <= row; ++col)
+ {
+ values[index++]=resCh[numVars_*row+col];
+ }
+ }
+ }
+ }
+
+
+ Index index=0;
+ for (Index row=0;row < numVars_ ;++row)
+ {
+ for (Index col=0; col <= row; ++col)
+ {
+ finalHessian_[n*row+col]=values[index++];
+ }
+ }
+
+ index=0;
+ for (Index col=0;col < numVars_ ;++col)
+ {
+ for (Index row=0; row <= col; ++row)
+ {
+ finalHessian_[n*row+col]=values[index++];
+ }
+ }
+ }
+
+ return true;
+}
+
+//returning the results
+void minconNLP::finalize_solution(SolverReturn status,Index n, const Number* x, const Number* z_L, const Number* z_U,Index m, const Number* g, const Number* lambda, Number obj_value,const IpoptData* ip_data,IpoptCalculatedQuantities* ip_cq)
+{
+ finalX_ = (double*)malloc(sizeof(double) * numVars_ * 1);
+ for (Index i=0; i<numVars_; i++)
+ {
+ finalX_[i] = x[i];
+ }
+
+ finalZl_ = (double*)malloc(sizeof(double) * numVars_ * 1);
+ for (Index i=0; i<n; i++)
+ {
+ finalZl_[i] = z_L[i];
+ }
+
+ finalZu_ = (double*)malloc(sizeof(double) * numVars_ * 1);
+ for (Index i=0; i<n; i++)
+ {
+ finalZu_[i] = z_U[i];
+ }
+
+ finalLambda_ = (double*)malloc(sizeof(double) * numConstr_ * 1);
+ for (Index i=0; i<m; i++)
+ {
+ finalLambda_[i] = lambda[i];
+ }
+
+ finalObjVal_ = obj_value;
+ status_ = status;
+ iter_ = ip_data->iter_count();
+}
+
+
+const double * minconNLP::getX()
+{
+ return finalX_;
+}
+
+const double * minconNLP::getGrad()
+{
+ return finalGradient_;
+}
+
+const double * minconNLP::getHess()
+{
+ return finalHessian_;
+}
+
+const double * minconNLP::getZl()
+{
+ return finalZl_;
+}
+
+const double * minconNLP::getZu()
+{
+ return finalZu_;
+}
+
+const double * minconNLP::getLambda()
+{
+ return finalLambda_;
+}
+
+double minconNLP::getObjVal()
+{
+ return finalObjVal_;
+}
+
+double minconNLP::iterCount()
+{
+ return (double)iter_;
+}
+
+int minconNLP::returnStatus()
+{
+ return status_;
+}
+
+}
+
+
+
diff --git a/sci_gateway/cpp/sci_minuncNLP.cpp b/sci_gateway/cpp/sci_minuncNLP.cpp
new file mode 100644
index 0000000..874c093
--- /dev/null
+++ b/sci_gateway/cpp/sci_minuncNLP.cpp
@@ -0,0 +1,377 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: R.Vidyadhar & Vignesh Kannan
+// Organization: FOSSEE, IIT Bombay
+// Email: rvidhyadar@gmail.com & vignesh2496@gmail.com
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+
+
+#include "minuncNLP.hpp"
+#include "IpIpoptData.hpp"
+#include "sci_iofunc.hpp"
+
+extern "C"
+{
+
+#include <api_scilab.h>
+#include <Scierror.h>
+#include <BOOL.h>
+#include <localization.h>
+#include <sciprint.h>
+#include <string.h>
+#include <assert.h>
+#include <iostream>
+
+using namespace std;
+using namespace Ipopt;
+
+minuncNLP::~minuncNLP()
+{
+ free(finalX_);
+ free(finalGradient_);
+ free(finalHessian_);
+}
+
+//get NLP info such as number of variables,constraints,no.of elements in jacobian and hessian to allocate memory
+bool minuncNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g, Index& nnz_h_lag, IndexStyleEnum& index_style)
+{
+ finalGradient_ = (double*)malloc(sizeof(double) * numVars_ * 1);
+ finalHessian_ = (double*)malloc(sizeof(double) * numVars_ * numVars_);
+ n=numVars_; // Number of variables
+ m=numConstr_; // Number of constraints
+ nnz_jac_g = 0; // No. of elements in Jacobian of constraints
+ nnz_h_lag = n*(n+1)/2; // No. of elements in lower traingle of Hessian of the Lagrangian.
+ index_style=C_STYLE; // Index style of matrices
+ return true;
+}
+
+//get variable and constraint bound info
+bool minuncNLP::get_bounds_info(Index n, Number* x_l, Number* x_u, Index m, Number* g_l, Number* g_u)
+{
+ unsigned int i;
+ for(i=0;i<n;i++)
+ {
+ x_l[i]=-1.0e19;
+ x_u[i]=1.0e19;
+ }
+
+ g_l=NULL;
+ g_u=NULL;
+ return true;
+}
+
+// return the value of the constraints: g(x)
+bool minuncNLP::eval_g(Index n, const Number* x, bool new_x, Index m, Number* g)
+{
+ // return the value of the constraints: g(x)
+ g=NULL;
+ return true;
+}
+
+// return the structure or values of the jacobian
+bool minuncNLP::eval_jac_g(Index n, const Number* x, bool new_x,Index m, Index nele_jac, Index* iRow, Index *jCol,Number* values)
+{
+ if (values == NULL)
+ {
+ // return the structure of the jacobian of the constraints
+ iRow=NULL;
+ jCol=NULL;
+ }
+ else
+ {
+ values=NULL;
+ }
+
+ return true;
+}
+
+//get value of objective function at vector x
+bool minuncNLP::eval_f(Index n, const Number* x, bool new_x, Number& obj_value)
+{
+ double check;
+ int* funptr=NULL;
+ if(getFunctionFromScilab(1,&funptr))
+ {
+ return 1;
+ }
+ char name[20]="f";
+ double obj=0;
+ double *xNew=x;
+ createMatrixOfDouble(pvApiCtx, 7, 1, numVars_, xNew);
+ int positionFirstElementOnStackForScilabFunction = 7;
+ int numberOfRhsOnScilabFunction = 1;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *funptr;
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+
+ if(getDoubleFromScilab(8,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ return true;
+ }
+ else
+ {
+ if(getDoubleFromScilab(7,&obj))
+ {
+ sciprint("No obj value");
+ return 1;
+ }
+ obj_value=obj;
+ }
+ return true;
+}
+
+//get value of gradient of objective function at vector x.
+bool minuncNLP::eval_grad_f(Index n, const Number* x, bool new_x, Number* grad_f)
+{
+ double check;
+ if (flag1_==0)
+ {
+ int* gradhessptr=NULL;
+ if(getFunctionFromScilab(2,&gradhessptr))
+ {
+ return 1;
+ }
+ double *xNew=x;
+ double t=1;
+ createMatrixOfDouble(pvApiCtx, 7, 1, numVars_, xNew);
+ createScalarDouble(pvApiCtx, 8,t);
+ int positionFirstElementOnStackForScilabFunction = 7;
+ int numberOfRhsOnScilabFunction = 2;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *gradhessptr;
+ char name[20]="gradhess";
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+ }
+
+ else if (flag1_==1)
+ {
+ int* gradptr=NULL;
+ if(getFunctionFromScilab(4,&gradptr))
+ {
+ return 1;
+ }
+ double *xNew=x;
+ createMatrixOfDouble(pvApiCtx, 7, 1, numVars_, xNew);
+ int positionFirstElementOnStackForScilabFunction = 7;
+ int numberOfRhsOnScilabFunction = 1;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *gradptr;
+ char name[20]="fGrad1";
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+ }
+
+ if(getDoubleFromScilab(8,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ return true;
+ }
+ else
+ {
+ double* resg;
+ int x0_rows,x0_cols;
+ if(getDoubleMatrixFromScilab(7, &x0_rows, &x0_cols, &resg))
+ {
+ sciprint("No results");
+ return 1;
+
+ }
+
+ Index i;
+ for(i=0;i<numVars_;i++)
+ {
+ grad_f[i]=resg[i];
+ finalGradient_[i]=resg[i];
+ }
+ }
+ return true;
+}
+
+// This method sets initial values for required vectors . For now we are assuming 0 to all values.
+bool minuncNLP::get_starting_point(Index n, bool init_x, Number* x,bool init_z, Number* z_L, Number* z_U,Index m, bool init_lambda,Number* lambda)
+{
+ assert(init_x == true);
+ assert(init_z == false);
+ assert(init_lambda == false);
+ if (init_x == true)
+ { //we need to set initial values for vector x
+ for (Index var=0;var<n;var++)
+ x[var]=varGuess_[var];//initialize with 0 or we can change.
+ }
+
+ return true;
+}
+
+/*
+ * Return either the sparsity structure of the Hessian of the Lagrangian,
+ * or the values of the Hessian of the Lagrangian for the given values for
+ * x,lambda,obj_factor.
+*/
+
+bool minuncNLP::eval_h(Index n, const Number* x, bool new_x,Number obj_factor, Index m, const Number* lambda,bool new_lambda, Index nele_hess, Index* iRow,Index* jCol, Number* values)
+{
+ double check;
+ if (values==NULL)
+ {
+ Index idx=0;
+ for (Index row = 0; row < numVars_; row++)
+ {
+ for (Index col = 0; col <= row; col++)
+ {
+ iRow[idx] = row;
+ jCol[idx] = col;
+ idx++;
+ }
+ }
+ }
+
+ else
+ {
+ if(flag2_==0)
+ {
+ int* gradhessptr=NULL;
+ if(getFunctionFromScilab(2,&gradhessptr))
+ {
+ return 1;
+ }
+ double *xNew=x;
+ double t=2;
+ createMatrixOfDouble(pvApiCtx, 7, 1, numVars_, xNew);
+ createScalarDouble(pvApiCtx, 8,t);
+ int positionFirstElementOnStackForScilabFunction = 7;
+ int numberOfRhsOnScilabFunction = 2;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *gradhessptr;
+ char name[20]="gradhess";
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+ }
+
+ else if (flag2_==1)
+ {
+ int* hessptr=NULL;
+ if(getFunctionFromScilab(6,&hessptr))
+ {
+ return 1;
+ }
+ double *xNew=x;
+ createMatrixOfDouble(pvApiCtx, 7, 1, numVars_, xNew);
+ int positionFirstElementOnStackForScilabFunction = 7;
+ int numberOfRhsOnScilabFunction = 1;
+ int numberOfLhsOnScilabFunction = 2;
+ int pointerOnScilabFunction = *hessptr;
+ char name[20]="fHess1";
+
+ C2F(scistring)(&positionFirstElementOnStackForScilabFunction,name,
+ &numberOfLhsOnScilabFunction,
+ &numberOfRhsOnScilabFunction,(unsigned long)strlen(name));
+ }
+
+ if(getDoubleFromScilab(8,&check))
+ {
+ return true;
+ }
+ if (check==1)
+ {
+ return true;
+ }
+ else
+ {
+ double* resh;
+ int x0_rows,x0_cols;
+ if(getDoubleMatrixFromScilab(7, &x0_rows, &x0_cols, &resh))
+ {
+ sciprint("No results");
+ return 1;
+ }
+
+ Index index=0;
+ for (Index row=0;row < numVars_ ;++row)
+ {
+ for (Index col=0; col <= row; ++col)
+ {
+ values[index++]=obj_factor*(resh[numVars_*row+col]);
+ }
+ }
+
+ Index i;
+ for(i=0;i<numVars_*numVars_;i++)
+ {
+ finalHessian_[i]=resh[i];
+ }
+ }
+ }
+
+ return true;
+}
+
+
+void minuncNLP::finalize_solution(SolverReturn status,Index n, const Number* x, const Number* z_L, const Number* z_U,Index m, const Number* g, const Number* lambda, Number obj_value,const IpoptData* ip_data,IpoptCalculatedQuantities* ip_cq)
+{
+ finalX_ = (double*)malloc(sizeof(double) * numVars_ * 1);
+ for (Index i=0; i<numVars_; i++)
+ {
+ finalX_[i] = x[i];
+ }
+
+ finalObjVal_ = obj_value;
+ status_ = status;
+ iter_ = ip_data->iter_count();
+}
+
+
+const double * minuncNLP::getX()
+{
+ return finalX_;
+}
+
+const double * minuncNLP::getGrad()
+{
+ return finalGradient_;
+}
+
+const double * minuncNLP::getHess()
+{
+ return finalHessian_;
+}
+
+double minuncNLP::getObjVal()
+{
+ return finalObjVal_;
+}
+
+double minuncNLP::iterCount()
+{
+ return (double)iter_;
+}
+
+int minuncNLP::returnStatus()
+{
+ return status_;
+}
+
+}
+
+
+