summaryrefslogtreecommitdiff
path: root/help/en_US/fmincon.xml
diff options
context:
space:
mode:
Diffstat (limited to 'help/en_US/fmincon.xml')
-rw-r--r--help/en_US/fmincon.xml475
1 files changed, 303 insertions, 172 deletions
diff --git a/help/en_US/fmincon.xml b/help/en_US/fmincon.xml
index 91569a2..a017135 100644
--- a/help/en_US/fmincon.xml
+++ b/help/en_US/fmincon.xml
@@ -17,7 +17,7 @@
<refnamediv>
<refname>fmincon</refname>
- <refpurpose>Solves a multi-variable constrainted optimization problem</refpurpose>
+ <refpurpose>Solves a multi-variable constrainted optimization problem.</refpurpose>
</refnamediv>
@@ -40,108 +40,122 @@
</refsynopsisdiv>
<refsection>
- <title>Parameters</title>
+ <title>Input Parameters</title>
<variablelist>
<varlistentry><term>f :</term>
- <listitem><para> a function, representing the objective function of the problem</para></listitem></varlistentry>
+ <listitem><para> A function, representing the objective function of the problem.</para></listitem></varlistentry>
<varlistentry><term>x0 :</term>
- <listitem><para> a vector of doubles, containing the starting values of variables of size (1 X n) or (n X 1) where 'n' is the number of Variables</para></listitem></varlistentry>
+ <listitem><para> A vector of doubles, containing the starting values of variables of size (1 X n) or (n X 1) where 'n' is the number of variables.</para></listitem></varlistentry>
<varlistentry><term>A :</term>
- <listitem><para> a matrix of doubles, containing the coefficients of linear inequality constraints of size (m X n) where 'm' is the number of linear inequality constraints</para></listitem></varlistentry>
+ <listitem><para> A matrix of doubles, containing the coefficients of linear inequality constraints of size (m X n) where 'm' is the number of linear inequality constraints.</para></listitem></varlistentry>
<varlistentry><term>b :</term>
- <listitem><para> a vector of doubles, related to 'A' and containing the the Right hand side equation of the linear inequality constraints of size (m X 1)</para></listitem></varlistentry>
+ <listitem><para> A vector of doubles, related to 'A' and represents the linear coefficients in the linear inequality constraints of size (m X 1).</para></listitem></varlistentry>
<varlistentry><term>Aeq :</term>
- <listitem><para> a matrix of doubles, containing the coefficients of linear equality constraints of size (m1 X n) where 'm1' is the number of linear equality constraints</para></listitem></varlistentry>
+ <listitem><para> A matrix of doubles, containing the coefficients of linear equality constraints of size (m1 X n) where 'm1' is the number of linear equality constraints.</para></listitem></varlistentry>
<varlistentry><term>beq :</term>
- <listitem><para> a vector of doubles, related to 'Aeq' and containing the the Right hand side equation of the linear equality constraints of size (m1 X 1)</para></listitem></varlistentry>
+ <listitem><para> A vector of double, vector of doubles, related to 'Aeq' and represents the linear coefficients in the equality constraints of size (m1 X 1).</para></listitem></varlistentry>
<varlistentry><term>lb :</term>
- <listitem><para> a vector of doubles, containing the lower bounds of the variables of size (1 X n) or (n X 1) where 'n' is the number of Variables</para></listitem></varlistentry>
+ <listitem><para> A vector of doubles, containing the lower bounds of the variables of size (1 X n) or (n X 1) where 'n' is the number of variables.</para></listitem></varlistentry>
<varlistentry><term>ub :</term>
- <listitem><para> a vector of doubles, containing the upper bounds of the variables of size (1 X n) or (n X 1) where 'n' is the number of Variables</para></listitem></varlistentry>
+ <listitem><para> A vector of doubles, containing the upper bounds of the variables of size (1 X n) or (n X 1) where 'n' is the number of variables.</para></listitem></varlistentry>
<varlistentry><term>nlc :</term>
- <listitem><para> a function, representing the Non-linear Constraints functions(both Equality and Inequality) of the problem. It is declared in such a way that non-linear inequality constraints are defined first as a single row vector (c), followed by non-linear equality constraints as another single row vector (ceq). Refer Example for definition of Constraint function.</para></listitem></varlistentry>
+ <listitem><para> A function, representing the Non-linear Constraints functions(both Equality and Inequality) of the problem. It is declared in such a way that non-linear inequality constraints (c), and the non-linear equality constraints (ceq) are defined as separate single row vectors.</para></listitem></varlistentry>
<varlistentry><term>options :</term>
- <listitem><para> a list, containing the option for user to specify. See below for details.</para></listitem></varlistentry>
+ <listitem><para> A list, containing the option for user to specify. See below for details.</para></listitem></varlistentry>
+ </variablelist>
+</refsection>
+<refsection>
+<title> Outputs</title>
+ <variablelist>
<varlistentry><term>xopt :</term>
- <listitem><para> a vector of doubles, cointating the computed solution of the optimization problem</para></listitem></varlistentry>
+ <listitem><para> A vector of doubles, containing the computed solution of the optimization problem.</para></listitem></varlistentry>
<varlistentry><term>fopt :</term>
- <listitem><para> a scalar of double, containing the the function value at x</para></listitem></varlistentry>
+ <listitem><para> A double, containing the value of the function at x.</para></listitem></varlistentry>
<varlistentry><term>exitflag :</term>
- <listitem><para> a scalar of integer, containing the flag which denotes the reason for termination of algorithm. See below for details.</para></listitem></varlistentry>
+ <listitem><para> An integer, containing the flag which denotes the reason for termination of algorithm. See below for details.</para></listitem></varlistentry>
<varlistentry><term>output :</term>
- <listitem><para> a structure, containing the information about the optimization. See below for details.</para></listitem></varlistentry>
+ <listitem><para> A structure, containing the information about the optimization. See below for details.</para></listitem></varlistentry>
<varlistentry><term>lambda :</term>
- <listitem><para> a structure, containing the Lagrange multipliers of lower bound, upper bound and constraints at the optimized point. See below for details.</para></listitem></varlistentry>
+ <listitem><para> A structure, containing the Lagrange multipliers of the lower bounds, upper bounds and constraints at the optimized point. See below for details.</para></listitem></varlistentry>
<varlistentry><term>gradient :</term>
- <listitem><para> a vector of doubles, containing the Objective's gradient of the solution.</para></listitem></varlistentry>
+ <listitem><para> A vector of doubles, containing the objective's gradient of the solution.</para></listitem></varlistentry>
<varlistentry><term>hessian :</term>
- <listitem><para> a matrix of doubles, containing the Lagrangian's hessian of the solution.</para></listitem></varlistentry>
+ <listitem><para> A matrix of doubles, containing the Lagrangian's hessian of the solution.</para></listitem></varlistentry>
</variablelist>
</refsection>
<refsection>
<title>Description</title>
<para>
-Search the minimum of a constrained optimization problem specified by :
+Search the minimum of a constrained optimization problem specified by:
+ </para>
+ <para>
Find the minimum of f(x) such that
</para>
<para>
<latex>
\begin{eqnarray}
-&amp;\mbox{min}_{x}
-&amp; f(x) \\
-&amp; \text{subject to} &amp; A*x \leq b \\
-&amp; &amp; Aeq*x \ = beq\\
-&amp; &amp; c(x) \leq 0\\
-&amp; &amp; ceq(x) \ = 0\\
-&amp; &amp; lb \leq x \leq ub \\
+&amp;\mbox{min}_{x}\ f(x) \\
+&amp; \text{Subjected to}\\
+&amp; &amp; A\boldsymbol{\cdot} x \leq b \\
+&amp; &amp;A_{eq}\boldsymbol{\cdot} {x} = b_{eq}\\
+&amp; &amp;c(x) \leq 0\\
+&amp; &amp;c_{eq}(x) \ = 0\\
+&amp; &amp;lb \leq x \leq ub \\
\end{eqnarray}
</latex>
</para>
<para>
-The routine calls Ipopt for solving the Constrained Optimization problem, Ipopt is a library written in C++.
+fmincon calls Ipopt, an optimization library written in C++, to solve the Constrained Optimization problem.
+ </para>
+ <para>
+<title>Options</title>
+The options allow the user to set various parameters of the Optimization problem. The syntax for the options is given by:
+ </para>
+ <para>
+options= list("MaxIter", [---], "CpuTime", [---], "GradObj", ---, "Hessian", ---, "GradCon", ---);
</para>
<para>
-The options allows the user to set various parameters of the Optimization problem.
-It should be defined as type "list" and contains the following fields.
+The options should be defined as type "list" and consist of the following fields:
<itemizedlist>
-<listitem>Syntax : options= list("MaxIter", [---], "CpuTime", [---], "GradObj", ---, "Hessian", ---, "GradCon", ---);</listitem>
-<listitem>MaxIter : a Scalar, containing the Maximum Number of Iteration that the solver should take.</listitem>
-<listitem>CpuTime : a Scalar, containing the Maximum amount of CPU Time that the solver should take.</listitem>
-<listitem>GradObj : a function, representing the gradient function of the Objective in Vector Form.</listitem>
-<listitem>Hessian : a function, representing the hessian function of the Lagrange in Symmetric Matrix Form with Input parameters x, Objective factor and Lambda. Refer Example for definition of Lagrangian Hessian function.</listitem>
-<listitem>GradCon : a function, representing the gradient of the Non-Linear Constraints (both Equality and Inequality) of the problem. It is declared in such a way that gradient of non-linear inequality constraints are defined first as a separate Matrix (cg of size m2 X n or as an empty), followed by gradient of non-linear equality constraints as a separate Matrix (ceqg of size m2 X n or as an empty) where m2 &amp; m3 are number of non-linear inequality and equality constraints respectively.</listitem>
-<listitem>Default Values : options = list("MaxIter", [3000], "CpuTime", [600]);</listitem>
+<listitem>MaxIter : A Scalar, specifying the maximum number of iterations that the solver should take.</listitem>
+<listitem>CpuTime : A Scalar, specifying the maximum amount of CPU time in seconds that the solver should take.</listitem>
+<listitem>GradObj : A function, representing the gradient function of the Objective in vector form.</listitem>
+<listitem>Hessian : A function, representing the hessian function of the Lagrange in the form of a Symmetric Matrix with Input parameters as x, Objective factor and Lambda. Refer to Example 5 for definition of Lagrangian Hessian function.</listitem>
+<listitem>GradCon : A function, representing the gradient of the Non-Linear Constraints (both Equality and Inequality) of the problem. It is declared in such a way that gradient of non-linear inequality constraints are defined first as a separate Matrix (cg of size m2 X n or as an empty), followed by gradient of non-linear equality constraints as a separate matrix (ceqg of size m2 X n or as an empty) where m2 &amp;amp; m3 are number of non-linear inequality and equality constraints respectively.</listitem>
</itemizedlist>
+The default values for the various items are given as:
+ </para>
+ <para>
+options = list("MaxIter", [3000], "CpuTime", [600]);
</para>
<para>
-The exitflag allows to know the status of the optimization which is given back by Ipopt.
+The exitflag allows the user to know the status of the optimization which is returned by Ipopt. The values it can take and what they indicate is described below:
<itemizedlist>
-<listitem>exitflag=0 : Optimal Solution Found </listitem>
-<listitem>exitflag=1 : Maximum Number of Iterations Exceeded. Output may not be optimal.</listitem>
-<listitem>exitflag=2 : Maximum amount of CPU Time exceeded. Output may not be optimal.</listitem>
-<listitem>exitflag=3 : Stop at Tiny Step.</listitem>
-<listitem>exitflag=4 : Solved To Acceptable Level.</listitem>
-<listitem>exitflag=5 : Converged to a point of local infeasibility.</listitem>
+<listitem> 0 : Optimal Solution Found </listitem>
+<listitem> 1 : Maximum Number of Iterations Exceeded. Output may not be optimal.</listitem>
+<listitem> 2 : Maximum amount of CPU Time exceeded. Output may not be optimal.</listitem>
+<listitem> 3 : Stop at Tiny Step.</listitem>
+<listitem> 4 : Solved To Acceptable Level.</listitem>
+<listitem> 5 : Converged to a point of local infeasibility.</listitem>
</itemizedlist>
</para>
<para>
-For more details on exitflag see the ipopt documentation, go to http://www.coin-or.org/Ipopt/documentation/
+For more details on exitflag, see the Ipopt documentation which can be found on http://www.coin-or.org/Ipopt/documentation/
</para>
<para>
-The output data structure contains detailed informations about the optimization process.
-It has type "struct" and contains the following fields.
+The output data structure contains detailed information about the optimization process.
+It is of type "struct" and contains the following fields.
<itemizedlist>
-<listitem>output.Iterations: The number of iterations performed during the search</listitem>
-<listitem>output.Cpu_Time: The total cpu-time spend during the search</listitem>
-<listitem>output.Objective_Evaluation: The number of Objective Evaluations performed during the search</listitem>
-<listitem>output.Dual_Infeasibility: The Dual Infeasiblity of the final soution</listitem>
-<listitem>output.Message: The output message for the problem</listitem>
+<listitem>output.Iterations: The number of iterations performed.</listitem>
+<listitem>output.Cpu_Time : The total cpu-time taken.</listitem>
+<listitem>output.Objective_Evaluation: The number of Objective Evaluations performed.</listitem>
+<listitem>output.Dual_Infeasibility : The Dual Infeasiblity of the final soution.</listitem>
+<listitem>output.Message: The output message for the problem.</listitem>
</itemizedlist>
</para>
<para>
-The lambda data structure contains the Lagrange multipliers at the end
-of optimization. In the current version the values are returned only when the the solution is optimal.
+The lambda data structure contains the Lagrange multipliers at the end of optimization. In the current version, the values are returned only when the the solution is optimal.
It has type "struct" and contains the following fields.
<itemizedlist>
<listitem>lambda.lower: The Lagrange multipliers for the lower bound constraints.</listitem>
@@ -153,60 +167,192 @@ It has type "struct" and contains the following fields.
</itemizedlist>
</para>
<para>
+A few examples displaying the various functionalities of fmincon have been provided below. You will find a series problems and the appropriate code snippets to solve them.
+ </para>
+
+</refsection>
+
+<refsection>
+ <title>Example</title>
+<para>
+ Here we solve a simple non-linear objective function, subjected to three linear inequality constraints.
+ </para>
+ <para>
+Find x in R^2 such that it minimizes:
+ </para>
+ <para>
+<latex>
+\begin{eqnarray}
+\mbox{min}_{x}\ f(x) = x_{1}^{2} - x_{1} \boldsymbol{\cdot} x_{2}/3 + x_{2}^{2}
+\end{eqnarray}
+\\\text{Subjected to:}\\
+\begin{eqnarray}
+\hspace{70pt} &amp;x_{1} + x_{2}&amp;\leq 2\\
+\hspace{70pt} &amp;x_{1} + x_{2}/4&amp;\leq 1\\
+\hspace{70pt} &amp;-x_{1} + x_{2}&amp;\geq -1\\
+\end{eqnarray}
+</latex>
+ </para>
+ <para>
+
+ </para>
+ <programlisting role="example"><![CDATA[
+//Example 1:
+//Objective function to be minimised
+function y=f(x)
+y=x(1)^2 - x(1)*x(2)/3 + x(2)^2;
+endfunction
+//Starting point, and linear constraints. Since we haven't added any eqaulity constraints or variable bounds, we need not specify them.
+x0=[0 , 0];
+A=[1,1 ; 1,1/4 ; 1,-1];
+b=[2;1;1];
+[x,fval,exitflag,output,lambda,grad,hessian] =fmincon(f, x0,A,b)
+
+ ]]></programlisting>
+</refsection>
+
+<refsection>
+ <title>Example</title>
+<para>
+Here we build up on the previous example by adding linear equality constraints.
+We add the following constraints to the problem specified above:
+ </para>
+ <para>
+<latex>
+\begin{eqnarray}
+&amp;x_{1} - x_{2}&amp;= 1
+\\&amp;2x_{1} + x_{2}&amp;= 2
+\\ \end{eqnarray}
+</latex>
+ </para>
+<para>
</para>
+ <programlisting role="example"><![CDATA[
+
+//Example 2:
+//Objective function to be minimised
+function y=f(x)
+y=x(1)^2 - x(1)*x(2)/3 + x(2)^2;
+endfunction
+//Starting point, and linear constraints.
+x0=[0 , 0];
+A=[1,1 ; 1,1/4 ; -1,1];
+b=[2;1;2];
+//We specify the linear equality constraints below.
+Aeq = [1,-1; 2, 1];
+beq = [1;2];
+[x,fval,exitflag,output,lambda,grad,hessian] =fmincon(f, x0,A,b,Aeq,beq);
+
+
+ ]]></programlisting>
</refsection>
<refsection>
- <title>Examples</title>
+ <title>Example</title>
+<para>
+In this example, we proceed to add the upper and lower bounds to the objective function.
+ </para>
+ <para>
+<latex>
+\begin{eqnarray}
+-1 &amp;\leq x_{1} &amp;\leq \infty\\
+-\infty &amp;\leq x_{2} &amp;\leq 1
+\end{eqnarray}
+</latex>
+ </para>
+<para>
+</para>
<programlisting role="example"><![CDATA[
-//Find x in R^2 such that it minimizes:
-//f(x)= -x1 -x2/3
-//x0=[0,0]
-//constraint-1 (c1): x1 + x2 <= 2
-//constraint-2 (c2): x1 + x2/4 <= 1
-//constraint-3 (c3): x1 - x2 <= 2
-//constraint-4 (c4): -x1/4 - x2 <= 1
-//constraint-5 (c5): -x1 - x2 <= -1
-//constraint-6 (c6): -x1 + x2 <= 2
-//constraint-7 (c7): x1 + x2 = 2
+//Example 3:
//Objective function to be minimised
function y=f(x)
-y=-x(1)-x(2)/3;
+y=x(1)^2 - x(1)*x(2)/3 + x(2)^2;
endfunction
-//Starting point, linear constraints and variable bounds
+//Starting point, and linear constraints.
x0=[0 , 0];
-A=[1,1 ; 1,1/4 ; 1,-1 ; -1/4,-1 ; -1,-1 ; -1,1];
-b=[2;1;2;1;-1;2];
-Aeq=[1,1];
-beq=[2];
-lb=[];
-ub=[];
-nlc=[];
-//Gradient of objective function
-function y= fGrad(x)
-y= [-1,-1/3];
+A=[1,1 ; 1,1/4 ; -1,1];
+b=[2;1;2];
+//We specify the linear equality constraints below.
+Aeq = [1,-1; 2, 1];
+beq = [1;2];
+//The upper and lower bounds for the objective function are defined in simple vectors as shown below.
+lb = [-1;-%inf];
+ub = [%inf;1]; //
+[x,fval,exitflag,output,lambda,grad,hessian] =fmincon(f, x0,A,b,Aeq,beq,lb,ub);
+//Press ENTER to continue
+
+ ]]></programlisting>
+</refsection>
+
+<refsection>
+ <title>Example</title>
+ <para>
+Finally, we add the non-linear constraints to the problem. Note that there is a notable difference in the way this is done as compared to defining the linear constraints.
+ </para>
+ <para>
+<latex>
+\begin{eqnarray}
+x_{1}^2-1&amp;\leq 0\\
+x_{1}^2+x_{2}^2-1&amp;\leq 0\\
+\end{eqnarray}
+</latex>
+</para>
+<para>
+</para>
+ <programlisting role="example"><![CDATA[
+//Example 4:
+//Objective function to be minimised
+function y=f(x)
+y=x(1)^2 - x(1)*x(2)/3 + x(2)^2;
endfunction
-//Hessian of lagrangian
-function y= lHess(x,obj,lambda)
-y= obj*[0,0;0,0]
+//Starting point, and linear constraints.
+x0=[0 , 0];
+A=[1,1 ; 1,1/4 ; -1,1];
+b=[2;1;2];
+//We specify the linear equality constraints below.
+Aeq = [1,-1; 2, 1];
+beq = [1;2];
+//The upper and lower bound for the objective function are specified below.
+lb = [-1;-%inf];
+ub = [%inf;1];
+//Nonlinear constraints are required to be defined as a single function with the inequality and equality constraints in separate vectors.
+function [c,ceq]=nlc(x)
+c=[x(1)^2-1,x(1)^2+x(2)^2-1];
+ceq=[];
endfunction
-//Options
-options=list("GradObj", fGrad, "Hessian", lHess);
-//Calling Ipopt
-[x,fval,exitflag,output,lambda,grad,hessian] =fmincon(f, x0,A,b,Aeq,beq,lb,ub,nlc,options)
-// Press ENTER to continue
+[x,fval,exitflag,output,lambda,grad,hessian] =fmincon(f, x0,A,b,Aeq,beq,lb,ub,nlc);
+//Press ENTER to continue
]]></programlisting>
</refsection>
<refsection>
- <title>Examples</title>
+<title>Example</title>
+ <para>
+Additional Functionality:
+ </para>
+ <para>
+We can further enhance the functionality of fmincon by setting input options. We can pre-define the gradient of the objective function and/or the hessian of the lagrange function and thereby improve the speed of computation. This is elaborated on in example 5. We take the following problem and add simple non-linear constraints, specify the gradients and the hessian of the Lagrange Function. We also set solver parameters using the options.
+ </para>
+
+ <para>
+ <latex>
+
+ \begin{eqnarray}
+ \mbox{min}_{x}\ f(x)= x_{1}*x_{2} + x_{2}*x_{3}
+ \end{eqnarray}
+ \\ \text{Subjected to:}\\
+ \begin{eqnarray}
+ \hspace{70pt} &amp;c_{1}: x_{1}^2 - x_{2}^2 + x_{3}^2&amp;\leq 2 \\
+ \hspace{70pt} &amp;c_{2}: x_{1}^2 + x_{2}^2 + x_{3}^2&amp;\leq 10
+ \end{eqnarray}
+
+ </latex>
+ </para>
+ <para>
+</para>
<programlisting role="example"><![CDATA[
-//Find x in R^3 such that it minimizes:
-//f(x)= x1*x2 + x2*x3
-//x0=[0.1 , 0.1 , 0.1]
-//constraint-1 (c1): x1^2 - x2^2 + x3^2 <= 2
-//constraint-2 (c2): x1^2 + x2^2 + x3^2 <= 10
+//Example 5:
//Objective function to be minimised
function y=f(x)
y=x(1)*x(2)+x(2)*x(3);
@@ -220,20 +366,20 @@ beq=[];
lb=[];
ub=[];
//Nonlinear constraints
-function [c,ceq]=nlc(x)
+function [c, ceq]=nlc(x)
c = [x(1)^2 - x(2)^2 + x(3)^2 - 2 , x(1)^2 + x(2)^2 + x(3)^2 - 10];
ceq = [];
endfunction
//Gradient of objective function
-function y= fGrad(x)
+function y=fGrad(x)
y= [x(2),x(1)+x(3),x(2)];
endfunction
-//Hessian of the Lagrange Function
-function y= lHess(x,obj,lambda)
+//Hessian of the Lagrange Function, which has been pre-defined to improve solver speed.
+function y=lHess(x, obj, lambda)
y= obj*[0,1,0;1,0,1;0,1,0] + lambda(1)*[2,0,0;0,-2,0;0,0,2] + lambda(2)*[2,0,0;0,2,0;0,0,2]
endfunction
//Gradient of Non-Linear Constraints
-function [cg,ceqg] = cGrad(x)
+function [cg, ceqg]=cGrad(x)
cg=[2*x(1) , -2*x(2) , 2*x(3) ; 2*x(1) , 2*x(2) , 2*x(3)];
ceqg=[];
endfunction
@@ -241,97 +387,82 @@ endfunction
options=list("MaxIter", [1500], "CpuTime", [500], "GradObj", fGrad, "Hessian", lHess,"GradCon", cGrad);
//Calling Ipopt
[x,fval,exitflag,output] =fmincon(f, x0,A,b,Aeq,beq,lb,ub,nlc,options)
-// Press ENTER to continue
-
+//Press ENTER to continue
]]></programlisting>
</refsection>
<refsection>
- <title>Examples</title>
+<title>Example</title>
+
+ <para>
+Infeasible Problems: Find x in R^2 such that it minimizes:
+ </para>
+ <para>
+<latex>
+\begin{eqnarray}
+f(x) = x_{1}^{2} - x_{1} \boldsymbol{\cdot} x_{2}/3 + x_{2}^{2}
+\end{eqnarray}
+\\\text{Subjected to:}\\
+\begin{eqnarray}
+\hspace{70pt} &amp;x_{1} + x_{2}&amp;\leq 2\\
+\hspace{70pt} &amp;x_{1} + x_{2}/4&amp;\leq 1\\
+\hspace{70pt} &amp;-x_{1} + x_{2}&amp;\geq -1\\
+\hspace{70pt} &amp;x_{1} + x_{2}&amp;= 2
+\end{eqnarray}
+</latex>
+ </para>
+ <para>
+ </para>
<programlisting role="example"><![CDATA[
-//The below problem is an unbounded problem:
-//Find x in R^3 such that it minimizes:
-//f(x)= -(x1^2 + x2^2 + x3^2)
-//x0=[0.1 , 0.1 , 0.1]
-// x1 <= 0
-// x2 <= 0
-// x3 <= 0
-//Objective function to be minimised
+//Example 6:
+//Infeasible objective function.
function y=f(x)
-y=-(x(1)^2+x(2)^2+x(3)^2);
+y=x(1)^2 - x(1)*x(2)/3 + x(2)^2;
endfunction
-//Starting point, linear constraints and variable bounds
-x0=[0.1 , 0.1 , 0.1];
-A=[];
-b=[];
-Aeq=[];
-beq=[];
-lb=[];
-ub=[0,0,0];
-//Options
-options=list("MaxIter", [1500], "CpuTime", [500]);
-//Calling Ipopt
-[x,fval,exitflag,output,lambda,grad,hessian] =fmincon(f, x0,A,b,Aeq,beq,lb,ub,[],options)
-// Press ENTER to continue
+x0=[0 , 0];
+A=[1,1 ; 1,1/4 ; 1,-1];
+b=[2;1;1];
+Aeq = [1,1];
+beq = 3;
+[x,fval,exitflag,output,lambda,grad,hessian] =fmincon(f, x0,A,b,Aeq,beq)
]]></programlisting>
-</refsection>
+ </refsection>
<refsection>
- <title>Examples</title>
- <programlisting role="example"><![CDATA[
-//The below problem is an infeasible problem:
-//Find x in R^3 such that in minimizes:
-//f(x)=x1*x2 + x2*x3
-//x0=[1,1,1]
-//constraint-1 (c1): x1^2 <= 1
-//constraint-2 (c2): x1^2 + x2^2 <= 1
-//constraint-3 (c3): x3^2 <= 1
-//constraint-4 (c4): x1^3 = 0.5
-//constraint-5 (c5): x2^2 + x3^2 = 0.75
-// 0 <= x1 <=0.6
-// 0.2 <= x2 <= inf
-// -inf <= x3 <= 1
-//Objective function to be minimised
+<title>Example</title>
+<para>
+Unbounded Problems: Find x in R^2 such that it minimizes:
+</para>
+ <para>
+<latex>
+\begin{eqnarray}
+f(x) = x_{1} \boldsymbol{\cdot} x_{2}/3 - x_{1}^{2} - x_{2}^{2}
+\end{eqnarray}
+\\\text{Subjected to:}\\
+\begin{eqnarray}
+\hspace{70pt} &amp;x_{1} + x_{2}&amp;\geq 2\\
+\hspace{70pt} &amp;x_{1} + x_{2}&amp;\geq -1
+\\\end{eqnarray}
+</latex>
+ </para>
+ <para>
+ </para>
+<programlisting role="example"><![CDATA[
+//Example 7: Unbounded objective function.
function y=f(x)
-y=x(1)*x(2)+x(2)*x(3);
+y=-(x(1)^2 - x(1)*x(2)/3 + x(2)^2);
endfunction
-//Starting point, linear constraints and variable bounds
-x0=[1,1,1];
-A=[];
-b=[];
-Aeq=[];
-beq=[];
-lb=[0 0.2,-%inf];
-ub=[0.6 %inf,1];
-//Nonlinear constraints
-function [c,ceq]=nlc(x)
-c=[x(1)^2-1,x(1)^2+x(2)^2-1,x(3)^2-1];
-ceq=[x(1)^3-0.5,x(2)^2+x(3)^2-0.75];
-endfunction
-//Gradient of objective function
-function y= fGrad(x)
-y= [x(2),x(1)+x(3),x(2)];
-endfunction
-//Hessian of the Lagrange Function
-function y= lHess(x,obj,lambda)
-y= obj*[0,1,0;1,0,1;0,1,0] + lambda(1)*[2,0,0;0,0,0;0,0,0] + ..
-lambda(2)*[2,0,0;0,2,0;0,0,0] +lambda(3)*[0,0,0;0,0,0;0,0,2] + ..
-lambda(4)*[6*x(1),0,0;0,0,0;0,0,0] + lambda(5)*[0,0,0;0,2,0;0,0,2];
-endfunction
-//Gradient of Non-Linear Constraints
-function [cg,ceqg] = cGrad(x)
-cg = [2*x(1),0,0;2*x(1),2*x(2),0;0,0,2*x(3)];
-ceqg = [3*x(1)^2,0,0;0,2*x(2),2*x(3)];
-endfunction
-//Options
-options=list("MaxIter", [1500], "CpuTime", [500], "GradObj", fGrad, "Hessian", lHess,"GradCon", cGrad);
-//Calling Ipopt
-[x,fval,exitflag,output,lambda,grad,hessian] =fmincon(f, x0,A,b,Aeq,beq,lb,ub,nlc,options)
-// Press ENTER to continue
- ]]></programlisting>
+x0=[0 , 0];
+A=[-1,-1 ; 1,1];
+b=[-2;1];
+[x,fval,exitflag,output,lambda,grad,hessian] =fmincon(f, x0,A,b);
+
+]]></programlisting>
+
</refsection>
+
<refsection>
<title>Authors</title>
<simplelist type="vert">