CONOPT
Loading...
Searching...
No Matches
tutorial2str.cpp
Go to the documentation of this file.
1
7
8#include <cstdio>
9#include <stdlib.h>
10#include <stdio.h>
11#include <math.h>
12#include <string.h>
13#include <iostream>
14#include <set>
15#include <utility>
16#include <vector>
17#include "conopt.hpp"
18
19#include <adolc/adolc.h>
20
22struct PairComp {
23 bool operator() (const std::pair<int, int> a, const std::pair<int, int> b) const {
24 return a.first < b.first || (a.first == b.first && a.second < b.second);
25 }
26};
27
28class Tut_ModelData : public ConoptModelData
29{
30public:
33 {
34 }
35
41 void evaluateNonlinearExpression(adouble* x, adouble* g, int rowno, int numvar)
42 {
43 /* */
44 /* Declare local copies of the optimization variables. This is */
45 /* just for convenience to make the expressions easier to read. */
46 /* */
47 adouble L, Inp, Out, P;
48 /* */
49 /* Declare parameters and their data values. */
50 /* */
51 double Al = 0.16;
52 double Ak = 2.0;
53 double Ainp = 0.16;
54 double Rho = 1.0;
55 double K = 4.0;
56
57 /* helper variables */
58 adouble hold1 = 0, hold2 = 0;
59
60 /* */
61 /* Move the optimization variables from the X vector to a set */
62 /* of local variables with the same names as the variables in */
63 /* the model description. This is not necessary, but it should make*/
64 /* the equations easier to recognize. */
65 /* This time we work with the C numbering convention */
66 /* */
67 L = x[0];
68 Inp = x[1];
69 Out = x[2];
70 P = x[3];
71 /* */
72 /* Row 0: the objective function is nonlinear */
73 /* */
74
75 if ( rowno == 0 ) {
76 *g = P * Out;
77 }
78 /* */
79 /* Row 1: The production function is nonlinear */
80 /* */
81 else if ( rowno == 1 ) {
82 /* */
83 /* Compute some common terms */
84 /* */
85 hold1 = (Al*pow(L,(-Rho)) + Ak*pow(K,(-Rho)) + Ainp*pow(Inp,(-Rho)));
86 hold2 = pow(hold1,( -1./Rho ));
87 *g = hold2;
88 }
89 /* */
90 /* Row = 2: The row is linear and will not be called. */
91 /* */
92
93 }
94
103 {
104 int numcons = numCons();
105 int numvar = numVar();
106
107 for (int c = 0; c < numcons; c++)
108 {
109 double res;
110 /* starting the trace for constraint c */
111 trace_on(c);
112
113 /* these are variable types for ADOL-C. Both the dependent and independent variables must be declared as
114 * adouble
115 */
116 adouble* ax;
117 adouble ag = 0;
118
119 /* setting the values of the independent variables */
120 ax = new adouble[numvar];
121 for (int i = 0; i < numvar; i++)
122 ax[i] <<= getVariable(i).curr;
123
124 evaluateNonlinearExpression(ax, &ag, c, numvar);
125
126 /* setting the results as the value of the independent variable. */
127 ag >>= res;
128
129 delete[] ax;
130
131 /* stopping the trace for constraint c */
132 trace_off();
133 }
134
135 }
136
145 {
146 int numcons = numCons();
147 int numvar = numVar();
148
149 double* x;
150 unsigned int** hesspat;
151 std::set<std::pair<int, int>, PairComp> hesspairs;
152 std::vector<int> rowidx;
153 std::vector<int> colidx;
154
155 x = new double[numvar];
156 hesspat = new unsigned int*[numvar];
157 for (int i = 0; i < numvar; i++)
158 hesspat[i] = new unsigned int[numvar];
159
160 /* assigning initial values to the x vector */
161 for (int i = 0; i < numvar; i++)
162 x[i] = getVariable(i).curr;
163
164 /* computing the structure of the hessian */
165 for (int c = 0; c < numcons; c++)
166 {
167 /* using the hess_pat() method from ADOL-C to compute the structure of the Hesssion */
168 hess_pat(c, numvar, x, hesspat, 0);
169
170 for (unsigned int i = 0; i < numvar; i++)
171 {
172 for (unsigned int j = 0; j < hesspat[i][0]; j++)
173 {
174 /* we are only interested in the lower triangle */
175 if (hesspat[i][j + 1] <= i)
176 hesspairs.insert(std::make_pair(i, hesspat[i][j + 1]));
177 }
178 }
179 }
180
181 for (const auto &p : hesspairs)
182 {
183 rowidx.push_back(p.first);
184 colidx.push_back(p.second);
185 }
186
187 for (int i = numvar - 1; i >= 0; i--)
188 delete[] hesspat[i];
189 delete[] hesspat;
190 delete[] x;
191
192 /* setting the structure of the hessian */
193 setSDLagrangianStructure(rowidx, colidx);
194 }
195
200 {
201 /* */
202 /* Information about Variables: */
203 /* Default: Lower = -Inf, Curr = 0, and Upper = +inf. */
204 /* Default: the status information in Vsta is not used. */
205 /* */
206 /* Lower bound on L = X[0] = 0.1 and initial value = 0.5: */
207 /* */
208 addVariable(0.1, CONOPT_INF, 0.5);
209 /* */
210 /* Lower bound on INP = X[1] = 0.1 and initial value = 0.5: */
211 /* */
212 addVariable(0.1, CONOPT_INF, 0.5);
213 /* */
214 /* Lower bound on OUT = X[2] and P = X[3] are both 0 and the */
215 /* default initial value of 0 is used: */
216 /* */
217 addVariable(0., CONOPT_INF);
218 addVariable(0., CONOPT_INF);
219 /* */
220 /* Information about Constraints: */
221 /* Default: Rhs = 0 */
222 /* Default: the status information in Esta and the function */
223 /* value in FV are not used. */
224 /* Default: Type: There is no default. */
225 /* 0 = Equality, */
226 /* 1 = Greater than or equal, */
227 /* 2 = Less than or equal, */
228 /* 3 = Non binding. */
229 /* */
230 /* Constraint 0 (Objective) */
231 /* Rhs = -0.1 and type Non binding */
232 /* */
233 addConstraint(ConoptConstraintType::Free, -0.1, {0, 1, 2, 3}, {-1, -1, 0, 0}, {0, 0, 1, 1});
234 /* */
235 /* Constraint 1 (Production Function) */
236 /* Rhs = 0 and type Equality */
237 /* */
238 addConstraint(ConoptConstraintType::Eq, 0.0, {0, 1, 2}, {0, 0, -1}, {1, 1, 0});
239 /* */
240 /* Constraint 2 (Price equation) */
241 /* Rhs = 4.0 and type Equality */
242 /* */
243 addConstraint(ConoptConstraintType::Eq, 4.0, {2, 3}, {1, 2}, {0, 0});
244
245 /* setting the objective constraint */
247
248 /* setting the optimisation direction */
250
251 /* initialising the automatic differentiation */
253
254 /* computes and sets the Hessian structure */
256 }
257
262 int FDEval(const double x[], double* g, double jac[], int rowno, const int jacnum[], int mode, int ignerr,
263 int* errcnt, int numvar, int numjac, int thread)
264 {
265 /* using the function() method from ADOL-C to compute the function value from the trace */
266 if (mode == 1 || mode == 3)
267 function(rowno, 1, numvar, const_cast<double*>(x), g);
268
269 /* using the gradient() method from ADOL-C to compute the gradient from the trace */
270 if (mode == 2 || mode == 3)
271 gradient(rowno, numvar, x, jac);
272
273 return 0;
274 }
275
280 int SDLagrVal(const double x[], const double u[], const int hsrw[], const int hscl[], double hsvl[],
281 int* nodrv, int numvar, int numcon, int nhess)
282 {
283 double** hessres;
284
285 // allocating memory for the hessian result
286 hessres = new double*[numvar];
287 for(int i = 0; i < numvar; i++)
288 hessres[i] = new double[numvar];
289
290 for(int c = 0; c < numcon; c++)
291 {
292 /* using the hessian() method from ADOL-C to compute the hessian value from the trace */
293 hessian(c, numvar, const_cast<double*>(x), hessres);
294
295 for(int i = 0; i < nhess; i++)
296 {
297 hsvl[i] += u[c]*hessres[hsrw[i]][hscl[i]];
298 }
299 }
300
301 for (int i = numvar - 1; i >= 0; i--)
302 delete[] hessres[i];
303 delete[] hessres;
304
305 return 0;
306 }
307};
308
309#include "std.cpp"
310
311int main(int argc, char** argv)
312{
313 int COI_Error = 0;
314
315 // getting the program name from the executable path
316 std::string pname = getProgramName(argv[0]);
317
318 // initialising the Conopt Object
319 ConoptCpp conopt(pname);
320 Tut_ModelData modeldata;
321 Tut_MessageHandler msghandler(pname);
322
323 // adding the message handler to the conopt interface
324 conopt.setMessageHandler(msghandler);
325
326 // building the model
327 modeldata.buildModel();
328
329 // loading the model in the conopt object
330 conopt.loadModel(modeldata);
331
332#if defined(CONOPT_LICENSE_INT_1) && defined(CONOPT_LICENSE_INT_2) && defined(CONOPT_LICENSE_INT_3) && defined(CONOPT_LICENSE_TEXT)
333 std::string license = CONOPT_LICENSE_TEXT;
334 COI_Error += conopt.setLicense(CONOPT_LICENSE_INT_1, CONOPT_LICENSE_INT_2, CONOPT_LICENSE_INT_3, license);
335#endif
336
337 if ( COI_Error )
338 cpp_log(conopt, "Skipping COI_Solve due to license error. COI_Error = " + COI_Error, COI_Error);
339
340 COI_Error = conopt.solve(); /* Optimize */
341
342 // checking the statuses and objective value
343 if ( conopt.modelStatus() != 2 || conopt.solutionStatus() != 1 )
344 {
345 cpp_log(conopt, "Incorrect Model or Solver Status", -1);
346 }
347 else if ( fabs( conopt.objectiveValue() - 0.572943 ) > 0.000001 )
348 {
349 cpp_log(conopt, "Incorrect objective returned", -1);
350 }
351
352 // printing the final status of the optimisation
353 conopt.printStatus();
354
355 cpp_log(conopt, "Successful Solve", COI_Error);
356}
The Model Data class.
Definition conopt.hpp:604
char pname[MAXLINE]
Definition comdecl.h:10
int COI_Error
Definition comdecl.h:15
CONOPT C++ interface header file. This is the main object for the CONOPT C++ interface.
int FDEval(const double x[], double *g, double jac[], int rowno, const int jacnum[], int mode, int ignerr, int *errcnt, int numvar, int numjac, int thread)
defines the nonlinearities of the model by returning numerical values.
void buildModel()
adds the variables and constraints for the problem
void evaluateNonlinearExpression(adouble *x, adouble *g, int rowno, int numvar)
void initialiseAutoDiff()
void computeHessianStructure()
int SDLagrVal(const double x[], const double u[], const int hsrw[], const int hscl[], double hsvl[], int *nodrv, int numvar, int numcon, int nhess)
Computes and returns the numerical values of the Hessian.
void evaluateNonlinearExpression(adouble *x, adouble *g, int rowno, int numvar)
Definition tutorial.cpp:29
void initialiseAutoDiff()
Definition tutorial.cpp:86
void buildModel()
adds the variables and constraints for the problem
Definition tutorial.cpp:42
int addVariable(double lower, double upper, double curr=0, int varstatus=-1)
adds a variable to the model. The non-zero coefficients are added later.
void setObjectiveElement(ConoptObjectiveElement elem, int elemindex)
sets the index for the objective variable or constraint
int addConstraint(ConoptConstraintType constype, double rhs, int slackstatus=-1)
adds a constraint to the problem. The non-zero coefficients are added later
void setOptimizationSense(ConoptSense sense)
sets the optimisation direction.
void setSDLagrangianStructure(const std::vector< int > &rownum, const std::vector< int > &colnum)
sets the structure of the second derivatives of the Lagrangian
int numCons() const
returns the number of constraints in the model
int numVar() const
returns the number of variables in the model
const ConoptVariable & getVariable(int index) const
returns a reference to the variable object
void cpp_log(Conopt &conopt, std::string msg, int code)
Definition std.cpp:111
std::string getProgramName(char *execname)
Definition std.cpp:95
bool operator()(const std::pair< int, int > a, const std::pair< int, int > b) const
int main(int argc, char **argv)
double L
Definition tutoriali.c:16
double P
Definition tutoriali.c:16
double hold2
Definition tutoriali.c:28
double hold1
Definition tutoriali.c:28
double Inp
Definition tutoriali.c:16
double Out
Definition tutoriali.c:16