CONOPT
Loading...
Searching...
No Matches
tutorial2.cpp
Go to the documentation of this file.
1
6
7#include <cstdio>
8#include <stdlib.h>
9#include <stdio.h>
10#include <math.h>
11#include <string.h>
12#include <iostream>
13#include "conopt.hpp"
14
15#include <adolc/adolc.h>
16
17class Tut_ModelData : public ConoptModelData
18{
19public:
22 {
23 }
24
30 void evaluateNonlinearExpression(adouble* x, adouble* g, int rowno, int numvar)
31 {
32 /* */
33 /* Declare local copies of the optimization variables. This is */
34 /* just for convenience to make the expressions easier to read. */
35 /* */
36 adouble L, Inp, Out, P;
37 /* */
38 /* Declare parameters and their data values. */
39 /* */
40 double Al = 0.16;
41 double Ak = 2.0;
42 double Ainp = 0.16;
43 double Rho = 1.0;
44 double K = 4.0;
45
46 /* helper variables */
47 adouble hold1 = 0, hold2 = 0;
48
49 /* */
50 /* Move the optimization variables from the X vector to a set */
51 /* of local variables with the same names as the variables in */
52 /* the model description. This is not necessary, but it should make*/
53 /* the equations easier to recognize. */
54 /* This time we work with the C numbering convention */
55 /* */
56 L = x[0];
57 Inp = x[1];
58 Out = x[2];
59 P = x[3];
60 /* */
61 /* Row 0: the objective function is nonlinear */
62 /* */
63
64 if ( rowno == 0 ) {
65 *g = P * Out;
66 }
67 /* */
68 /* Row 1: The production function is nonlinear */
69 /* */
70 else if ( rowno == 1 ) {
71 /* */
72 /* Compute some common terms */
73 /* */
74 hold1 = (Al*pow(L,(-Rho)) + Ak*pow(K,(-Rho)) + Ainp*pow(Inp,(-Rho)));
75 hold2 = pow(hold1,( -1./Rho ));
76 *g = hold2;
77 }
78 /* */
79 /* Row = 2: The row is linear and will not be called. */
80 /* */
81
82 }
83
88 {
89 int numcons = numCons();
90 int numvar = numVar();
91
92 for (int c = 0; c < numcons; c++)
93 {
94 double res;
95 /* starting the trace for constraint c */
96 trace_on(c);
97
98 /* these are variable types for ADOL-C. Both the dependent and independent variables must be declared as
99 * adouble
100 */
101 adouble* ax;
102 adouble ag = 0;
103
104 /* setting the values of the independent variables */
105 ax = new adouble[numvar];
106 for (int i = 0; i < numvar; i++)
107 ax[i] <<= getVariable(i).curr;
108
109 evaluateNonlinearExpression(ax, &ag, c, numvar);
110
111 /* setting the results as the value of the independent variable. */
112 ag >>= res;
113
114 delete[] ax;
115
116 /* stopping the trace for constraint c */
117 trace_off();
118 }
119 }
120
125 {
126 /* */
127 /* Information about Variables: */
128 /* Default: Lower = -Inf, Curr = 0, and Upper = +inf. */
129 /* Default: the status information in Vsta is not used. */
130 /* */
131 /* Lower bound on L = X[0] = 0.1 and initial value = 0.5: */
132 /* */
133 addVariable(0.1, CONOPT_INF, 0.5);
134 /* */
135 /* Lower bound on INP = X[1] = 0.1 and initial value = 0.5: */
136 /* */
137 addVariable(0.1, CONOPT_INF, 0.5);
138 /* */
139 /* Lower bound on OUT = X[2] and P = X[3] are both 0 and the */
140 /* default initial value of 0 is used: */
141 /* */
142 addVariable(0., CONOPT_INF);
143 addVariable(0., CONOPT_INF);
144 /* */
145 /* Information about Constraints: */
146 /* Default: Rhs = 0 */
147 /* Default: the status information in Esta and the function */
148 /* value in FV are not used. */
149 /* Default: Type: There is no default. */
150 /* 0 = Equality, */
151 /* 1 = Greater than or equal, */
152 /* 2 = Less than or equal, */
153 /* 3 = Non binding. */
154 /* */
155 /* Constraint 0 (Objective) */
156 /* Rhs = -0.1 and type Non binding */
157 /* */
158 addConstraint(ConoptConstraintType::Free, -0.1, {0, 1, 2, 3}, {-1, -1, 0, 0}, {0, 0, 1, 1});
159 /* */
160 /* Constraint 1 (Production Function) */
161 /* Rhs = 0 and type Equality */
162 /* */
163 addConstraint(ConoptConstraintType::Eq, 0.0, {0, 1, 2}, {0, 0, -1}, {1, 1, 0});
164 /* */
165 /* Constraint 2 (Price equation) */
166 /* Rhs = 4.0 and type Equality */
167 /* */
168 addConstraint(ConoptConstraintType::Eq, 4.0, {2, 3}, {1, 2}, {0, 0});
169
170 /* setting the objective constraint */
172
173 /* setting the optimisation direction */
175
176 /* setting the structure of the hessian */
177 setSDLagrangianStructure({0, 1, 1, 3}, {0, 0, 1, 2});
178
179 /* initialising the automatic differentiation */
181 }
182
187 int FDEval(const double x[], double* g, double jac[], int rowno, const int jacnum[], int mode, int ignerr,
188 int* errcnt, int numvar, int numjac, int thread)
189 {
190 /* using the function() method from ADOL-C to compute the function value from the trace */
191 if (mode == 1 || mode == 3)
192 function(rowno, 1, numvar, const_cast<double*>(x), g);
193
194 /* using the gradient() method from ADOL-C to compute the gradient from the trace */
195 if (mode == 2 || mode == 3)
196 gradient(rowno, numvar, x, jac);
197
198 return 0;
199 }
200
205 int SDLagrVal(const double x[], const double u[], const int hsrw[], const int hscl[], double hsvl[],
206 int* nodrv, int numvar, int numcon, int nhess)
207 {
208 double** hessres;
209
210 /* allocating memory for the hessian result */
211 hessres = new double*[numvar];
212 for(int i = 0; i < numvar; i++)
213 hessres[i] = new double[numvar];
214
215 for(int c = 0; c < numcon; c++)
216 {
217 /* using the hessian() method from ADOL-C to compute the hessian value from the trace */
218 hessian(c, numvar, const_cast<double*>(x), hessres);
219
220 for(int i = 0; i < nhess; i++)
221 {
222 hsvl[i] += u[c]*hessres[hsrw[i]][hscl[i]];
223 }
224 }
225
226 for (int i = numvar - 1; i >= 0; i--)
227 delete[] hessres[i];
228 delete[] hessres;
229
230 return 0;
231 }
232};
233
234#include "std.cpp"
235
236int main(int argc, char** argv)
237{
238 int COI_Error = 0;
239
240 // getting the program name from the executable path
241 std::string pname = getProgramName(argv[0]);
242
243 // initialising the Conopt Object
244 ConoptCpp conopt(pname);
245 Tut_ModelData modeldata;
246 Tut_MessageHandler msghandler(pname);
247
248 // adding the message handler to the conopt interface
249 conopt.setMessageHandler(msghandler);
250
251 // building the model
252 modeldata.buildModel();
253
254 // loading the model in the conopt object
255 conopt.loadModel(modeldata);
256
257#if defined(CONOPT_LICENSE_INT_1) && defined(CONOPT_LICENSE_INT_2) && defined(CONOPT_LICENSE_INT_3) && defined(CONOPT_LICENSE_TEXT)
258 std::string license = CONOPT_LICENSE_TEXT;
259 COI_Error += conopt.setLicense(CONOPT_LICENSE_INT_1, CONOPT_LICENSE_INT_2, CONOPT_LICENSE_INT_3, license);
260#endif
261
262 if ( COI_Error )
263 cpp_log(conopt, "Skipping COI_Solve due to license error. COI_Error = " + COI_Error, COI_Error);
264
265 COI_Error = conopt.solve(); /* Optimize */
266
267 // checking the statuses and objective value
268 if ( conopt.modelStatus() != 2 || conopt.solutionStatus() != 1 )
269 {
270 cpp_log(conopt, "Incorrect Model or Solver Status", -1);
271 }
272 else if ( fabs( conopt.objectiveValue() - 0.572943 ) > 0.000001 )
273 {
274 cpp_log(conopt, "Incorrect objective returned", -1);
275 }
276
277 // printing the final status of the optimisation
278 conopt.printStatus();
279
280 cpp_log(conopt, "Successful Solve", COI_Error);
281}
int main(int argc, char **argv)
The Model Data class.
Definition conopt.hpp:604
char pname[MAXLINE]
Definition comdecl.h:10
int COI_Error
Definition comdecl.h:15
CONOPT C++ interface header file. This is the main object for the CONOPT C++ interface.
int FDEval(const double x[], double *g, double jac[], int rowno, const int jacnum[], int mode, int ignerr, int *errcnt, int numvar, int numjac, int thread)
defines the nonlinearities of the model by returning numerical values.
void buildModel()
adds the variables and constraints for the problem
void evaluateNonlinearExpression(adouble *x, adouble *g, int rowno, int numvar)
Definition tutorial2.cpp:30
void initialiseAutoDiff()
Definition tutorial2.cpp:87
int SDLagrVal(const double x[], const double u[], const int hsrw[], const int hscl[], double hsvl[], int *nodrv, int numvar, int numcon, int nhess)
Computes and returns the numerical values of the Hessian.
void evaluateNonlinearExpression(adouble *x, adouble *g, int rowno, int numvar)
Definition tutorial.cpp:29
void initialiseAutoDiff()
Definition tutorial.cpp:86
void buildModel()
adds the variables and constraints for the problem
Definition tutorial.cpp:42
int addVariable(double lower, double upper, double curr=0, int varstatus=-1)
adds a variable to the model. The non-zero coefficients are added later.
void setObjectiveElement(ConoptObjectiveElement elem, int elemindex)
sets the index for the objective variable or constraint
int addConstraint(ConoptConstraintType constype, double rhs, int slackstatus=-1)
adds a constraint to the problem. The non-zero coefficients are added later
void setOptimizationSense(ConoptSense sense)
sets the optimisation direction.
void setSDLagrangianStructure(const std::vector< int > &rownum, const std::vector< int > &colnum)
sets the structure of the second derivatives of the Lagrangian
int numCons() const
returns the number of constraints in the model
int numVar() const
returns the number of variables in the model
const ConoptVariable & getVariable(int index) const
returns a reference to the variable object
void cpp_log(Conopt &conopt, std::string msg, int code)
Definition std.cpp:111
std::string getProgramName(char *execname)
Definition std.cpp:95
double L
Definition tutoriali.c:16
double P
Definition tutoriali.c:16
double hold2
Definition tutoriali.c:28
double hold1
Definition tutoriali.c:28
double Inp
Definition tutoriali.c:16
double Out
Definition tutoriali.c:16