1 : //------------------------------------------------------------------------
2 : // Copyright (C) 1993:
3 : // J.C. Meza
4 : // Sandia National Laboratories
5 : // meza@california.sandia.gov
6 : //------------------------------------------------------------------------
7 :
8 : #ifdef HAVE_CONFIG_H
9 : #include "OPT++_config.h"
10 : #endif
11 :
12 : #ifdef HAVE_STD
13 : #include <cfloat>
14 : #include <cstring>
15 : #else
16 : #include <float.h>
17 : #include <string.h>
18 : #endif
19 :
20 : #ifdef WITH_MPI
21 : #include "mpi.h"
22 : #endif
23 :
24 : #include "NLP0.h"
25 : #include "TOLS.h"
26 : #include "cblas.h"
27 : #include "ioformat.h"
28 :
29 : using namespace std;
30 :
31 : using NEWMAT::Real;
32 : using NEWMAT::FloatingPointPrecision;
33 : using NEWMAT::ColumnVector;
34 : using NEWMAT::Matrix;
35 : using NEWMAT::SymmetricMatrix;
36 :
37 : //------------------------------------------------------------------------
38 : // external subroutines referenced by this module
39 : // Included to prevent compilation error when -ansi flag is used.
40 : //------------------------------------------------------------------------
41 :
42 : #if !(defined(__GNUC__) && __GNUC__ >= 3)
43 : extern "C" {
44 : double copysign(double, double);
45 : }
46 : #else
47 : #ifdef CYGWIN
48 : extern "C" {
49 : extern double copysign _PARAMS((double, double));
50 : }
51 : #endif
52 : #endif
53 :
54 : namespace OPTPP {
55 :
56 : //----------------------------------------------------------------------------
57 : // Evaluate the Hessian using finite differences
58 : // No analytical gradients available so use function values
59 : //----------------------------------------------------------------------------
60 :
61 0 : SymmetricMatrix NLP0::FD2Hessian(ColumnVector & sx)
62 : {
63 0 : Real mcheps = FloatingPointPrecision::Epsilon();
64 0 : ColumnVector fcn_accrcy = getFcnAccrcy();
65 : double hieps, eta;
66 : int i;
67 0 : int nr = getDim();
68 :
69 : double xtmpi, xtmpj;
70 : double fii, fij, fx;
71 :
72 0 : ColumnVector fhi(nr), step(nr);
73 0 : SymmetricMatrix H(nr);
74 :
75 : // do we need this??? Dougm xc = getXc();
76 0 : fx = getF();
77 :
78 :
79 0 : for (i=1; i<=nr; i++) {
80 0 : hieps = max(mcheps,fcn_accrcy(i));
81 0 : eta = pow(hieps,0.333333);
82 0 : step(i) = eta*max(fabs(mem_xc(i)),sx(i));
83 0 : step(i) = copysign(step(i),mem_xc(i));
84 0 : xtmpi = mem_xc(i);
85 0 : mem_xc(i) = xtmpi + step(i);
86 0 : fhi(i) = evalF(mem_xc);
87 0 : mem_xc(i) = xtmpi;
88 : }
89 :
90 0 : for (i=1; i<=nr; i++) {
91 0 : xtmpi = mem_xc(i);
92 0 : mem_xc(i) = mem_xc(i) + step(i)*2.0;
93 0 : fii = evalF(mem_xc);
94 0 : H(i,i) = ((fx - fhi(i)) + (fii - fhi(i))) / (step(i)*step(i));
95 0 : mem_xc(i) = xtmpi + step(i);
96 0 : for (int j=i+1; j<=nr; ++j) {
97 0 : xtmpj = mem_xc(j);
98 0 : mem_xc(j) = mem_xc(j) + step(j);
99 0 : fij = evalF(mem_xc);
100 0 : H(i,j) = ((fx - fhi(i)) + (fij - fhi(j))) / (step(i)*step(j));
101 0 : mem_xc(j) = xtmpj;
102 : }
103 0 : mem_xc(i) = xtmpi;
104 : }
105 0 : return H;
106 : }
107 :
108 : // Compute gradient using backward finite differences
109 :
110 : ColumnVector NLP0::BDGrad(const ColumnVector& sx, const ColumnVector& x,
111 0 : double& fx, ColumnVector& grad)
112 : {
113 : int i, j, gradStart, gradEnd, nBcasts;
114 : double xtmp, fminus, hi, hieps;
115 :
116 0 : int me = 0;
117 0 : int nprocs = 1;
118 0 : int ndim = getDim();
119 0 : const int tmpSize = (int) ceil((double) ndim/nprocs);
120 0 : double *tmpGradMinus = new double[tmpSize];
121 0 : ColumnVector xcurrent = x;
122 0 : ColumnVector fcn_accrcy = getFcnAccrcy();
123 0 : Real mcheps = FloatingPointPrecision::Epsilon();
124 0 : SpecOption SpecPass = getSpecOption();
125 :
126 : #ifdef WITH_MPI
127 :
128 : int error, resultlen, flag;
129 : char buffer[MPI_MAX_ERROR_STRING];
130 :
131 : // Check to see if MPI has been initialized.
132 :
133 : error = MPI_Initialized(&flag);
134 : if (error != MPI_SUCCESS) {
135 : MPI_Error_string(error, buffer, &resultlen);
136 : cerr << "NLP0::BDGrad: MPI Error - " << buffer << endl;
137 : }
138 :
139 : // If it has, reset me and nprocs accordingly.
140 :
141 : if (flag == 1) {
142 : error = MPI_Comm_rank(MPI_COMM_WORLD, &me);
143 : if (error != MPI_SUCCESS) {
144 : MPI_Error_string(error, buffer, &resultlen);
145 : cerr << "NLP0::BDGrad: MPI Error - " << buffer << endl;
146 : }
147 : error = MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
148 : if (error != MPI_SUCCESS) {
149 : MPI_Error_string(error, buffer, &resultlen);
150 : cerr << "NLP0::BDGrad: MPI Error - " << buffer << endl;
151 : }
152 : }
153 :
154 : #endif
155 :
156 : // Set loop endpoints, f, and x according to which pass of
157 : // speculative gradient evaluation this is.
158 :
159 0 : if (SpecPass == Spec1) {
160 0 : if (me == nprocs-1) {
161 0 : setSpecOption(NoSpec);
162 0 : fx = evalF(xcurrent);
163 0 : setSpecOption(SpecPass);
164 : #ifdef WITH_MPI
165 : if (nprocs > 1)
166 : MPI_Bcast(&fx, 1, MPI_DOUBLE, me, MPI_COMM_WORLD);
167 : #endif
168 : }
169 0 : gradStart = 1;
170 0 : gradEnd = min(ndim, nprocs-1);
171 0 : nBcasts = min(ndim, nprocs-1);
172 : }
173 0 : else if (SpecPass == Spec2) {
174 0 : gradStart = nprocs;
175 0 : gradEnd = ndim;
176 0 : nBcasts = min(gradEnd-gradStart+1, nprocs);
177 : }
178 : else {
179 0 : gradStart = 1;
180 0 : gradEnd = ndim;
181 0 : nBcasts = min(ndim, nprocs);
182 0 : if (SpecPass != NoSpec) {
183 : cerr << "NLP0::BDGrad: Invalid speculative gradient option - "
184 : << "SpecFlag = " << SpecPass << "\n"
185 0 : << "Assuming NoSpec..." << endl;
186 : }
187 : }
188 :
189 : // Compute my piece of the gradient.
190 :
191 0 : for (i=me+gradStart; i<=gradEnd; i+=nprocs) {
192 :
193 0 : hieps = sqrt(max(mcheps, fcn_accrcy(i)));
194 0 : hi = hieps * max(fabs(xcurrent(i)), sx(i));
195 0 : hi = copysign(hi, xcurrent(i));
196 0 : xtmp = xcurrent(i);
197 0 : xcurrent(i) = xtmp - hi;
198 :
199 0 : setSpecOption(NoSpec);
200 0 : fminus = evalF(xcurrent);
201 0 : setSpecOption(SpecPass);
202 : #ifdef WITH_MPI
203 : if (SpecPass == Spec1)
204 : MPI_Bcast(&fx, 1, MPI_DOUBLE, nprocs-1, MPI_COMM_WORLD);
205 : #endif
206 0 : grad(i) = (fx - fminus) / hi;
207 0 : xcurrent(i) = xtmp;
208 : }
209 :
210 : // Share my piece of the gradient with everyone else, and
211 : // incorporate their pieces.
212 :
213 0 : if (nprocs > 1) {
214 :
215 0 : for (i=0; i<nBcasts; i++) {
216 :
217 0 : for (j=me+gradStart; j<=gradEnd; j+=nprocs)
218 0 : tmpGradMinus[(j-me-gradStart)/nprocs] = grad(j);
219 :
220 : #ifdef WITH_MPI
221 : MPI_Bcast(tmpGradMinus, tmpSize, MPI_DOUBLE, i, MPI_COMM_WORLD);
222 : #endif
223 :
224 0 : for (j=i+gradStart; j<=gradEnd; j+=nprocs)
225 0 : grad(j) = tmpGradMinus[(j-i-gradStart)/nprocs];
226 : }
227 : }
228 :
229 0 : if (tmpGradMinus != NULL)
230 0 : delete[] tmpGradMinus;
231 :
232 0 : return grad;
233 : }
234 :
235 : // Compute gradient using forward finite differences
236 :
237 : ColumnVector NLP0::FDGrad(const ColumnVector& sx, const ColumnVector& x,
238 0 : double& fx, ColumnVector& grad)
239 : {
240 : int i, j, gradStart, gradEnd, nBcasts;
241 : double xtmp, fplus, hi, hieps;
242 :
243 0 : int me = 0;
244 0 : int nprocs = 1;
245 0 : int ndim = getDim();
246 0 : const int tmpSize = (int) ceil((double) ndim/nprocs);
247 0 : double *tmpGradPlus = new double[tmpSize];
248 0 : ColumnVector xcurrent = x;
249 0 : ColumnVector fcn_accrcy = getFcnAccrcy();
250 0 : Real mcheps = FloatingPointPrecision::Epsilon();
251 0 : SpecOption SpecPass = getSpecOption();
252 :
253 : #ifdef WITH_MPI
254 :
255 : int error, resultlen, flag;
256 : char buffer[MPI_MAX_ERROR_STRING];
257 :
258 : // Check to see if MPI has been initialized.
259 :
260 : error = MPI_Initialized(&flag);
261 : if (error != MPI_SUCCESS) {
262 : MPI_Error_string(error, buffer, &resultlen);
263 : cerr << "NLP0::FDGrad: MPI Error - " << buffer << endl;
264 : }
265 :
266 : // If it has, reset me and nprocs accordingly.
267 :
268 : if (flag == 1) {
269 : error = MPI_Comm_rank(MPI_COMM_WORLD, &me);
270 : if (error != MPI_SUCCESS) {
271 : MPI_Error_string(error, buffer, &resultlen);
272 : cerr << "NLP0::FDGrad: MPI Error - " << buffer << endl;
273 : }
274 : error = MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
275 : if (error != MPI_SUCCESS) {
276 : MPI_Error_string(error, buffer, &resultlen);
277 : cerr << "NLP0::FDGrad: MPI Error - " << buffer << endl;
278 : }
279 : }
280 :
281 : #endif
282 :
283 : // Set loop endpoints, f, and x according to which pass of
284 : // speculative gradient evaluation this is.
285 :
286 0 : if (SpecPass == Spec1) {
287 0 : if (me == nprocs-1) {
288 0 : setSpecOption(NoSpec);
289 0 : fx = evalF(xcurrent);
290 0 : setSpecOption(SpecPass);
291 : #ifdef WITH_MPI
292 : if (nprocs > 1)
293 : MPI_Bcast(&fx, 1, MPI_DOUBLE, me, MPI_COMM_WORLD);
294 : #endif
295 : }
296 0 : gradStart = 1;
297 0 : gradEnd = min(ndim, nprocs-1);
298 0 : nBcasts = min(ndim, nprocs-1);
299 : }
300 0 : else if (SpecPass == Spec2) {
301 0 : gradStart = nprocs;
302 0 : gradEnd = ndim;
303 0 : nBcasts = min(gradEnd-gradStart+1, nprocs);
304 : }
305 : else {
306 0 : gradStart = 1;
307 0 : gradEnd = ndim;
308 0 : nBcasts = min(ndim, nprocs);
309 0 : if (SpecPass != NoSpec) {
310 : cerr << "NLP0::FDGrad: Invalid speculative gradient option - "
311 : << "SpecFlag = " << SpecPass << "\n"
312 0 : << "Assuming NoSpec..." << endl;
313 : }
314 : }
315 :
316 : // Compute only my piece of the gradient.
317 :
318 0 : for (i=me+gradStart; i<=gradEnd; i+=nprocs) {
319 :
320 0 : hieps = sqrt(max(mcheps, fcn_accrcy(i)));
321 0 : hi = hieps * max(fabs(xcurrent(i)), sx(i));
322 0 : hi = copysign(hi, xcurrent(i));
323 0 : xtmp = xcurrent(i);
324 0 : xcurrent(i) = xtmp + hi;
325 0 : setSpecOption(NoSpec);
326 0 : fplus = evalF(xcurrent);
327 0 : setSpecOption(SpecPass);
328 : #ifdef WITH_MPI
329 : if (SpecPass == Spec1)
330 : MPI_Bcast(&fx, 1, MPI_DOUBLE, nprocs-1, MPI_COMM_WORLD);
331 : #endif
332 0 : grad(i) = (fplus - fx) / hi;
333 0 : xcurrent(i) = xtmp;
334 : }
335 :
336 : // Share my piece of the gradient with everyone else, and
337 : // incorporate their pieces.
338 :
339 0 : if (nprocs > 1) {
340 :
341 0 : for (i=0; i<nBcasts; i++) {
342 :
343 0 : for (j=me+gradStart; j<=gradEnd; j+=nprocs)
344 0 : tmpGradPlus[(j-me-gradStart)/nprocs] = grad(j);
345 :
346 : #ifdef WITH_MPI
347 : MPI_Bcast(tmpGradPlus, tmpSize, MPI_DOUBLE, i, MPI_COMM_WORLD);
348 : #endif
349 :
350 0 : for (j=i+gradStart; j<=gradEnd; j+=nprocs)
351 0 : grad(j) = tmpGradPlus[(j-i-gradStart)/nprocs];
352 :
353 : }
354 : }
355 :
356 0 : if (tmpGradPlus != NULL)
357 0 : delete[] tmpGradPlus;
358 :
359 0 : return grad;
360 : }
361 :
362 : // Compute gradient using central differences
363 :
364 : ColumnVector NLP0::CDGrad(const ColumnVector& sx, const ColumnVector& x,
365 0 : double& fx, ColumnVector& grad)
366 : {
367 : int i, gradStart, gradEnd, myStart, inc, nBcasts;
368 : double xtmp, fplus, fminus, hi, hieps;
369 : int j, tmpSize;
370 :
371 0 : int me = 0;
372 0 : int nprocs = 1;
373 0 : int ndim = getDim();
374 0 : ColumnVector xcurrent = x;
375 0 : ColumnVector fcn_accrcy = getFcnAccrcy();
376 0 : Real mcheps = FloatingPointPrecision::Epsilon();
377 0 : SpecOption SpecPass = getSpecOption();
378 :
379 : #ifdef WITH_MPI
380 :
381 : char buffer[MPI_MAX_ERROR_STRING];
382 : int error, resultlen, flag;
383 :
384 : // Check to see if MPI has been initialized.
385 :
386 : error = MPI_Initialized(&flag);
387 : if (error != MPI_SUCCESS) {
388 : MPI_Error_string(error, buffer, &resultlen);
389 : cerr << "NLP0::CDGrad: MPI Error - " << buffer << endl;
390 : }
391 :
392 : // If it has, reset me and nprocs accordingly.
393 :
394 : if (flag == 1) {
395 : error = MPI_Comm_rank(MPI_COMM_WORLD, &me);
396 : if (error != MPI_SUCCESS) {
397 : MPI_Error_string(error, buffer, &resultlen);
398 : cerr << "NLP0::CDGrad: MPI Error - " << buffer << endl;
399 : }
400 : error = MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
401 : if (error != MPI_SUCCESS) {
402 : MPI_Error_string(error, buffer, &resultlen);
403 : cerr << "NLP0::CDGrad: MPI Error - " << buffer << endl;
404 : }
405 : }
406 :
407 : #endif
408 :
409 : // Set loop endpoints, f, and x according to which pass of
410 : // speculative gradient evaluation this is.
411 :
412 0 : if (SpecPass == Spec1) {
413 0 : if (me == nprocs-1) {
414 0 : setSpecOption(NoSpec);
415 0 : fx = evalF(xcurrent);
416 0 : setSpecOption(SpecPass);
417 : }
418 0 : gradStart = 1;
419 0 : gradEnd = min(ndim, (int) floor((double) (nprocs-1)/2));
420 0 : if (nprocs > 1)
421 0 : inc = (int) floor((double) (nprocs-1)/2);
422 : else
423 0 : inc = 1;
424 0 : nBcasts = min(ndim, (int) floor((double) (nprocs-1)/2));
425 : }
426 0 : else if (SpecPass == Spec2) {
427 0 : gradStart = (int) ceil((double) nprocs/2);
428 0 : gradEnd = ndim;
429 0 : if (nprocs > 1)
430 0 : inc = (int) floor((double) nprocs/2);
431 : else
432 0 : inc = 1;
433 0 : nBcasts = min(gradEnd-gradStart+1, (int) floor((double) nprocs/2));
434 : }
435 : else {
436 0 : gradStart = 1;
437 0 : gradEnd = ndim;
438 0 : if (nprocs > 1)
439 0 : inc = (int) floor((double) nprocs/2);
440 : else
441 0 : inc = 1;
442 0 : nBcasts = min(ndim, (int) floor((double) nprocs/2));
443 0 : if (SpecPass != NoSpec) {
444 : cerr << "NLP0::FDGrad: Invalid speculative gradient option - "
445 : << "SpecFlag = " << SpecPass << "\n"
446 0 : << "Assuming NoSpec..." << endl;
447 : }
448 : }
449 :
450 : // Compute only my piece of the gradient.
451 :
452 0 : myStart = (int) floor((double) me/2) + gradStart;
453 :
454 0 : for (i=myStart; i<=gradEnd; i+=inc) {
455 :
456 0 : hieps = max(mcheps, fcn_accrcy(i));
457 0 : hieps = pow(hieps, 0.333333);
458 0 : hi = hieps*max(fabs(xcurrent(i)), sx(i));
459 0 : hi = copysign(hi, xcurrent(i));
460 0 : xtmp = xcurrent(i);
461 :
462 : #ifdef WITH_MPI
463 : if (nprocs > 1) {
464 :
465 : // For multiple processors, even processors look forward, and
466 : // odd look backward.
467 :
468 : if (me%2 == 0)
469 : xcurrent(i) = xtmp + hi;
470 : else
471 : xcurrent(i) = xtmp - hi;
472 :
473 : setSpecOption(NoSpec);
474 : grad(i) = evalF(xcurrent)/(2*hi);
475 : setSpecOption(SpecPass);
476 : }
477 : else {
478 : // Otherwise, do the same as in the serial case.
479 : #endif
480 0 : xcurrent(i) = xtmp + hi;
481 0 : setSpecOption(NoSpec);
482 0 : fplus = evalF(xcurrent);
483 0 : setSpecOption(SpecPass);
484 :
485 0 : xcurrent(i) = xtmp - hi;
486 0 : setSpecOption(NoSpec);
487 0 : fminus = evalF(xcurrent);
488 0 : setSpecOption(SpecPass);
489 :
490 0 : grad(i)= (fplus - fminus) / (2*hi);
491 : #ifdef WITH_MPI
492 : }
493 : #endif
494 0 : xcurrent(i) = xtmp;
495 : }
496 :
497 0 : if (nprocs > 1) {
498 :
499 0 : if (nprocs%2 == 0)
500 0 : tmpSize = (int) ceil((double) (2*ndim)/nprocs);
501 : else
502 : // If there are an odd number of processors, the last one doesn't
503 : // count.
504 0 : tmpSize = (int) ceil((double) (2*ndim)/(nprocs-1));
505 :
506 0 : double *tmpGradPlus = new double[tmpSize];
507 0 : double *tmpGradMinus = new double[tmpSize];
508 :
509 0 : for (i=0; i<nBcasts; i++) {
510 :
511 0 : for (j=myStart; j<=gradEnd; j+=inc) {
512 0 : if (me%2 == 0)
513 0 : tmpGradPlus[(j-myStart)/inc] = grad(j);
514 : else
515 0 : tmpGradMinus[(j-myStart)/inc] = grad(j);
516 : }
517 :
518 : #ifdef WITH_MPI
519 : MPI_Bcast(tmpGradPlus, tmpSize, MPI_DOUBLE, 2*i, MPI_COMM_WORLD);
520 : MPI_Bcast(tmpGradMinus, tmpSize, MPI_DOUBLE, (2*i)+1, MPI_COMM_WORLD);
521 : #endif
522 :
523 0 : for (j=i+gradStart; j<=gradEnd; j+=inc)
524 : grad(j) = tmpGradPlus[(j-i-gradStart)/inc] -
525 0 : tmpGradMinus[(j-i-gradStart)/inc];
526 : }
527 0 : if (tmpGradPlus != NULL)
528 0 : delete[] tmpGradPlus;
529 0 : if (tmpGradMinus != NULL)
530 0 : delete[] tmpGradMinus;
531 : }
532 0 : return grad;
533 : }
534 :
535 : // Compute gradient of nonlinear constraints using backward finite differences
536 0 : Matrix NLP0::CONBDGrad(const ColumnVector& sx)
537 : {
538 0 : Real mcheps = FloatingPointPrecision::Epsilon();
539 0 : ColumnVector fcn_accrcy = getFcnAccrcy();
540 : int i, n;
541 : double xtmp, hi, hieps;
542 0 : ColumnVector fminus, fx;
543 :
544 0 : n = dim;
545 0 : Matrix grad(n,ncnln), gtmp(ncnln,n);
546 0 : fx = evalCF(mem_xc);
547 : //fx = getConstraintValue();
548 :
549 0 : for (i=1; i<=n; i++) {
550 0 : hieps = sqrt(max(mcheps,fcn_accrcy(i) ));
551 0 : hi = hieps*max(fabs(mem_xc(i)),sx(i));
552 0 : hi = copysign(hi,mem_xc(i));
553 0 : xtmp = mem_xc(i);
554 0 : mem_xc(i) = xtmp - hi;
555 0 : fminus = evalCF(mem_xc);
556 0 : gtmp.Column(i) = (fx - fminus) / hi;
557 0 : mem_xc(i) = xtmp;
558 : }
559 0 : grad = gtmp.t();
560 0 : return grad;
561 : }
562 :
563 : // Compute gradient of nonlinear constraints using forward finite differences
564 0 : Matrix NLP0::CONFDGrad(const ColumnVector& sx)
565 : {
566 0 : Real mcheps = FloatingPointPrecision::Epsilon();
567 0 : ColumnVector fcn_accrcy = getFcnAccrcy();
568 : int i, n;
569 : double xtmp, hi, hieps;
570 0 : ColumnVector fx, fplus;
571 :
572 0 : n = dim;
573 0 : ColumnVector xcurrent(n);
574 0 : Matrix grad(n,ncnln), gtmp(ncnln,n);
575 0 : xcurrent = getXc();
576 0 : fx = evalCF(xcurrent);
577 : //fx = getConstraintValue();
578 :
579 0 : for (i=1; i<=n; i++) {
580 0 : hieps = sqrt(max(mcheps,fcn_accrcy(i) ));
581 0 : hi = hieps*max(fabs(xcurrent(i)),sx(i));
582 0 : hi = copysign(hi,xcurrent(i));
583 0 : xtmp = xcurrent(i);
584 0 : xcurrent(i) = xtmp + hi;
585 0 : fplus = evalCF(xcurrent);
586 0 : gtmp.Column(i) = (fplus - fx) / hi;
587 0 : xcurrent(i) = xtmp;
588 : }
589 0 : grad = gtmp.t();
590 0 : return grad;
591 : }
592 :
593 : // Compute gradient of nonlinear constraints using central differences
594 0 : Matrix NLP0::CONCDGrad(const ColumnVector& sx)
595 : {
596 0 : Real mcheps = FloatingPointPrecision::Epsilon();
597 0 : ColumnVector fcn_accrcy = getFcnAccrcy();
598 : int i, n;
599 : double xtmp, hi, hieps;
600 0 : ColumnVector fplus, fminus;
601 :
602 0 : n = dim;
603 0 : Matrix grad(n, ncnln), gtmp(ncnln,n);
604 :
605 0 : for (i=1; i<=n; i++) {
606 :
607 0 : hieps = max(mcheps,fcn_accrcy(i) );
608 0 : hieps = pow(hieps,0.333333);
609 :
610 0 : hi = hieps*max(fabs(mem_xc(i)),sx(i));
611 0 : hi = copysign(hi,mem_xc(i));
612 :
613 0 : xtmp = mem_xc(i);
614 0 : mem_xc(i) = xtmp + hi;
615 0 : fplus = evalCF(mem_xc);
616 :
617 0 : mem_xc(i) = xtmp - hi;
618 0 : fminus = evalCF(mem_xc);
619 :
620 0 : gtmp.Column(i)= (fplus - fminus) / (2*hi);
621 0 : mem_xc(i) = xtmp;
622 : }
623 0 : grad = gtmp.t();
624 0 : return grad;
625 : }
626 :
627 : //-------------------------------------------------------------------------
628 : // Output Routines
629 : //-------------------------------------------------------------------------
630 :
631 0 : void NLP0::printState(char * s)
632 : { // Print out current state: x current, gradient and Function value
633 0 : cout << "\n\n========= " << s << " ===========\n\n";
634 0 : cout << "\n i\t x \t grad \t\t fcn_accrcy \n\n";
635 0 : for (int i=1; i<=dim; i++)
636 : cout << d(i,5) << "\t" << e(mem_xc(i),12,4)<< "\t\t"
637 0 : << e(mem_fcn_accrcy(i),12,4) << "\n";
638 0 : cout <<"Function Value = " << e(fvalue,12,4) << "\n";
639 : //cout <<"Function Accuracy = " << e(mem_fcn_accrcy,12,4) << "\n";
640 0 : cout <<"\n\n===================================================\n\n";
641 0 : }
642 :
643 0 : void NLP0::fPrintState(ostream *nlpout, char * s)
644 : { // Print out current state: x current, gradient and Function value
645 0 : (*nlpout) << "\n\n========= " << s << " ===========\n\n";
646 0 : (*nlpout) << "\n i\t x \t grad \t\t fcn_accrcy \n\n";
647 0 : for (int i=1; i<=dim; i++)
648 : (*nlpout) << d(i,5) << "\t" << e(mem_xc(i),12,4) << "\t\t"
649 0 : << e(mem_fcn_accrcy(i),12,4) << "\n";
650 0 : (*nlpout) <<"Function Value = " << e(fvalue,12,4) << "\n";
651 : // (*nlpout) <<"Function Accuracy = " << e(mem_fcn_accrcy,12,4) << "\n";
652 0 : (*nlpout) <<"\n\n===================================================\n\n";
653 0 : }
654 :
655 0 : void NLP0::saveState()
656 : { // Save current state: x current, gradient and Function value
657 0 : cout << dim << "\n";
658 0 : for (int i=1; i<=dim; i++)
659 0 : cout << e(mem_xc(i),24,16) << "\t" << e(mem_fcn_accrcy(i),24,16) << "\n";
660 : cout << e(fvalue,24,16) << "\n"
661 : << nlp_name << "\n"
662 : << nfevals << "\n"
663 : << is_expensive << "\n"
664 : << debug_ << "\n"
665 0 : << e(function_time,24,16) << "\n";
666 0 : }
667 :
668 0 : bool NLP0::hasConstraints()
669 : {
670 0 : bool nonempty = false;
671 0 : if( constraint_)
672 0 : if (constraint_->getNumOfSets())
673 0 : nonempty = true;
674 0 : return nonempty;
675 : }
676 :
677 0 : void NLP0::printConstraints()
678 : {
679 :
680 0 : constraint_->printConstraints();
681 0 : cout <<"\n\n===================================================\n\n";
682 :
683 0 : }
684 :
685 156 : } // namespace OPTPP
|