Class documentation of Concepts

Loading...
Searching...
No Matches
optimization.h
1/*************************************************************************
2ALGLIB 3.11.0 (source code generated 2017-05-11)
3Copyright (c) Sergey Bochkanov (ALGLIB project).
4
5>>> SOURCE LICENSE >>>
6This program is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation (www.fsf.org); either version 2 of the
9License, or (at your option) any later version.
10
11This program is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16A copy of the GNU General Public License is available at
17http://www.fsf.org/licensing/licenses
18>>> END OF LICENSE >>>
19*************************************************************************/
20#ifndef _optimization_pkg_h
21#define _optimization_pkg_h
22#include "ap.h"
23#include "alglibinternal.h"
24#include "alglibmisc.h"
25#include "linalg.h"
26#include "solvers.h"
27
29//
30// THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES)
31//
33namespace alglib_impl
34{
35typedef struct
36{
37 ae_int_t n;
38 ae_int_t k;
39 double alpha;
40 double tau;
41 double theta;
42 ae_matrix a;
43 ae_matrix q;
44 ae_vector b;
45 ae_vector r;
46 ae_vector xc;
47 ae_vector d;
48 ae_vector activeset;
49 ae_matrix tq2dense;
50 ae_matrix tk2;
51 ae_vector tq2diag;
52 ae_vector tq1;
53 ae_vector tk1;
54 double tq0;
55 double tk0;
56 ae_vector txc;
57 ae_vector tb;
58 ae_int_t nfree;
59 ae_int_t ecakind;
60 ae_matrix ecadense;
61 ae_matrix eq;
62 ae_matrix eccm;
63 ae_vector ecadiag;
64 ae_vector eb;
65 double ec;
66 ae_vector tmp0;
67 ae_vector tmp1;
68 ae_vector tmpg;
69 ae_matrix tmp2;
70 ae_bool ismaintermchanged;
71 ae_bool issecondarytermchanged;
72 ae_bool islineartermchanged;
73 ae_bool isactivesetchanged;
75typedef struct
76{
77 ae_vector norms;
78 ae_vector alpha;
79 ae_vector rho;
80 ae_matrix yk;
81 ae_vector idx;
82 ae_vector bufa;
83 ae_vector bufb;
85typedef struct
86{
87 ae_int_t n;
88 ae_int_t k;
89 ae_vector d;
90 ae_matrix v;
91 ae_vector bufc;
92 ae_matrix bufz;
93 ae_matrix bufw;
94 ae_vector tmp;
96typedef struct
97{
98 ae_int_t ns;
99 ae_int_t nd;
100 ae_int_t nr;
101 ae_matrix densea;
102 ae_vector b;
103 ae_vector nnc;
104 double debugflops;
105 ae_int_t debugmaxinnerits;
106 ae_vector xn;
107 ae_vector xp;
108 ae_matrix tmpca;
109 ae_matrix tmplq;
110 ae_matrix trda;
111 ae_vector trdd;
112 ae_vector crb;
113 ae_vector g;
114 ae_vector d;
115 ae_vector dx;
116 ae_vector diagaa;
117 ae_vector cb;
118 ae_vector cx;
119 ae_vector cborg;
120 ae_vector tmpcholesky;
121 ae_vector r;
122 ae_vector regdiag;
123 ae_vector tmp0;
124 ae_vector tmp1;
125 ae_vector tmp2;
126 ae_vector rdtmprowmap;
128typedef struct
129{
130 ae_int_t n;
131 ae_int_t algostate;
132 ae_vector xc;
133 ae_bool hasxc;
134 ae_vector s;
135 ae_vector h;
136 ae_vector activeset;
137 ae_bool basisisready;
138 ae_matrix sbasis;
139 ae_matrix pbasis;
140 ae_matrix ibasis;
141 ae_int_t basissize;
142 ae_bool feasinitpt;
143 ae_bool constraintschanged;
144 ae_vector hasbndl;
145 ae_vector hasbndu;
146 ae_vector bndl;
147 ae_vector bndu;
148 ae_matrix cleic;
149 ae_int_t nec;
150 ae_int_t nic;
151 ae_vector mtx;
152 ae_vector mtas;
153 ae_vector cdtmp;
154 ae_vector corrtmp;
155 ae_vector unitdiagonal;
156 snnlssolver solver;
157 ae_vector scntmp;
158 ae_vector tmp0;
159 ae_vector tmpfeas;
160 ae_matrix tmpm0;
161 ae_vector rctmps;
162 ae_vector rctmpg;
163 ae_vector rctmprightpart;
164 ae_matrix rctmpdense0;
165 ae_matrix rctmpdense1;
166 ae_vector rctmpisequality;
167 ae_vector rctmpconstraintidx;
168 ae_vector rctmplambdas;
169 ae_matrix tmpbasis;
170} sactiveset;
171typedef struct
172{
173 double epsg;
174 double epsf;
175 double epsx;
176 ae_int_t maxouterits;
177 ae_bool cgphase;
178 ae_bool cnphase;
179 ae_int_t cgminits;
180 ae_int_t cgmaxits;
181 ae_int_t cnmaxupdates;
182 ae_int_t sparsesolver;
184typedef struct
185{
186 ae_int_t n;
187 ae_int_t nmain;
188 ae_int_t nslack;
189 ae_int_t nec;
190 ae_int_t nic;
191 ae_int_t akind;
192 ae_matrix densea;
193 sparsematrix sparsea;
194 ae_bool sparseupper;
195 double absamax;
196 double absasum;
197 double absasum2;
198 ae_vector b;
199 ae_vector bndl;
200 ae_vector bndu;
201 ae_vector havebndl;
202 ae_vector havebndu;
203 ae_matrix cleic;
204 ae_vector xs;
205 ae_vector xf;
206 ae_vector gc;
207 ae_vector xp;
208 ae_vector dc;
209 ae_vector dp;
210 ae_vector cgc;
211 ae_vector cgp;
212 sactiveset sas;
213 ae_vector activated;
214 ae_int_t nfree;
215 ae_int_t cnmodelage;
216 ae_matrix densez;
217 sparsematrix sparsecca;
218 ae_vector yidx;
219 ae_vector regdiag;
220 ae_vector regx0;
221 ae_vector tmpcn;
222 ae_vector tmpcni;
223 ae_vector tmpcnb;
224 ae_vector tmp0;
225 ae_vector stpbuf;
226 sparsebuffers sbuf;
227 ae_int_t repinneriterationscount;
228 ae_int_t repouteriterationscount;
229 ae_int_t repncholesky;
230 ae_int_t repncupdates;
231} qqpbuffers;
232typedef struct
233{
234 ae_int_t n;
235 ae_int_t m;
236 double epsg;
237 double epsf;
238 double epsx;
239 ae_int_t maxits;
240 ae_bool xrep;
241 double stpmax;
242 ae_vector s;
243 double diffstep;
244 ae_int_t nfev;
245 ae_int_t mcstage;
246 ae_int_t k;
247 ae_int_t q;
248 ae_int_t p;
249 ae_vector rho;
250 ae_matrix yk;
251 ae_matrix sk;
252 ae_vector xp;
253 ae_vector theta;
254 ae_vector d;
255 double stp;
256 ae_vector work;
257 double fold;
258 double trimthreshold;
259 ae_int_t prectype;
260 double gammak;
261 ae_matrix denseh;
262 ae_vector diagh;
263 ae_vector precc;
264 ae_vector precd;
265 ae_matrix precw;
266 ae_int_t preck;
267 precbuflbfgs precbuf;
268 precbuflowrank lowrankbuf;
269 double fbase;
270 double fm2;
271 double fm1;
272 double fp1;
273 double fp2;
274 ae_vector autobuf;
275 ae_vector x;
276 double f;
277 ae_vector g;
278 ae_bool needf;
279 ae_bool needfg;
280 ae_bool xupdated;
281 ae_bool userterminationneeded;
282 double teststep;
283 rcommstate rstate;
284 ae_int_t repiterationscount;
285 ae_int_t repnfev;
286 ae_int_t repvaridx;
287 ae_int_t repterminationtype;
288 linminstate lstate;
290typedef struct
291{
292 ae_int_t iterationscount;
293 ae_int_t nfev;
294 ae_int_t varidx;
295 ae_int_t terminationtype;
297typedef struct
298{
299 double epsx;
300 ae_int_t outerits;
301 double rho;
303typedef struct
304{
305 ae_vector nulc;
306 ae_matrix sclsfta;
307 ae_vector sclsftb;
308 ae_vector sclsfthasbndl;
309 ae_vector sclsfthasbndu;
310 ae_vector sclsftbndl;
311 ae_vector sclsftbndu;
312 ae_vector sclsftxc;
313 ae_matrix sclsftcleic;
314 ae_matrix exa;
315 ae_vector exb;
316 ae_vector exxc;
317 ae_vector exxn;
318 ae_vector exbndl;
319 ae_vector exbndu;
320 ae_vector exscale;
321 ae_vector exxorigin;
322 qqpsettings qqpsettingsuser;
323 qqpbuffers qqpbuf;
324 ae_vector nulcest;
325 ae_vector tmp0;
326 ae_matrix tmp2;
327 ae_vector modelg;
328 ae_vector d;
329 ae_vector deltax;
330 convexquadraticmodel dummycqm;
331 sparsematrix dummysparse;
332 ae_matrix qrkkt;
333 ae_vector qrrightpart;
334 ae_vector qrtau;
335 ae_vector qrsv0;
336 ae_vector qrsvx1;
337 ae_int_t repinneriterationscount;
338 ae_int_t repouteriterationscount;
339 ae_int_t repncholesky;
340 ae_int_t repnmv;
342typedef struct
343{
344 double epsg;
345 double epsf;
346 double epsx;
347 ae_int_t maxits;
349typedef struct
350{
351 sactiveset sas;
352 ae_vector pg;
353 ae_vector gc;
354 ae_vector xs;
355 ae_vector xn;
356 ae_vector workbndl;
357 ae_vector workbndu;
358 ae_vector havebndl;
359 ae_vector havebndu;
360 ae_matrix workcleic;
361 ae_vector rctmpg;
362 ae_vector tmp0;
363 ae_vector tmp1;
364 ae_vector tmpb;
365 ae_int_t repinneriterationscount;
366 ae_int_t repouteriterationscount;
367 ae_int_t repncholesky;
369typedef struct
370{
371 ae_int_t n;
372 double epsg;
373 double epsf;
374 double epsx;
375 ae_int_t maxits;
376 double stpmax;
377 double suggestedstep;
378 ae_bool xrep;
379 ae_bool drep;
380 ae_int_t cgtype;
381 ae_int_t prectype;
382 ae_vector diagh;
383 ae_vector diaghl2;
384 ae_matrix vcorr;
385 ae_int_t vcnt;
386 ae_vector s;
387 double diffstep;
388 ae_int_t nfev;
389 ae_int_t mcstage;
390 ae_int_t k;
391 ae_vector xk;
392 ae_vector dk;
393 ae_vector xn;
394 ae_vector dn;
395 ae_vector d;
396 double fold;
397 double stp;
398 double curstpmax;
399 ae_vector yk;
400 double lastgoodstep;
401 double lastscaledstep;
402 ae_int_t mcinfo;
403 ae_bool innerresetneeded;
404 ae_bool terminationneeded;
405 double trimthreshold;
406 ae_int_t rstimer;
407 ae_vector x;
408 double f;
409 ae_vector g;
410 ae_bool needf;
411 ae_bool needfg;
412 ae_bool xupdated;
413 ae_bool algpowerup;
414 ae_bool lsstart;
415 ae_bool lsend;
416 ae_bool userterminationneeded;
417 double teststep;
418 rcommstate rstate;
419 ae_int_t repiterationscount;
420 ae_int_t repnfev;
421 ae_int_t repvaridx;
422 ae_int_t repterminationtype;
423 ae_int_t debugrestartscount;
424 linminstate lstate;
425 double fbase;
426 double fm2;
427 double fm1;
428 double fp1;
429 double fp2;
430 double betahs;
431 double betady;
432 ae_vector work0;
433 ae_vector work1;
434} mincgstate;
435typedef struct
436{
437 ae_int_t iterationscount;
438 ae_int_t nfev;
439 ae_int_t varidx;
440 ae_int_t terminationtype;
442typedef struct
443{
444 ae_int_t nmain;
445 ae_int_t nslack;
446 double epsg;
447 double epsf;
448 double epsx;
449 ae_int_t maxits;
450 ae_bool xrep;
451 ae_bool drep;
452 double stpmax;
453 double diffstep;
454 sactiveset sas;
455 ae_vector s;
456 ae_int_t prectype;
457 ae_vector diagh;
458 ae_vector x;
459 double f;
460 ae_vector g;
461 ae_bool needf;
462 ae_bool needfg;
463 ae_bool xupdated;
464 ae_bool lsstart;
465 ae_bool steepestdescentstep;
466 ae_bool boundedstep;
467 ae_bool userterminationneeded;
468 double teststep;
469 rcommstate rstate;
470 ae_vector ugc;
471 ae_vector cgc;
472 ae_vector xn;
473 ae_vector ugn;
474 ae_vector cgn;
475 ae_vector xp;
476 double fc;
477 double fn;
478 double fp;
479 ae_vector d;
480 ae_matrix cleic;
481 ae_int_t nec;
482 ae_int_t nic;
483 double lastgoodstep;
484 double lastscaledgoodstep;
485 double maxscaledgrad;
486 ae_vector hasbndl;
487 ae_vector hasbndu;
488 ae_vector bndl;
489 ae_vector bndu;
490 ae_int_t repinneriterationscount;
491 ae_int_t repouteriterationscount;
492 ae_int_t repnfev;
493 ae_int_t repvaridx;
494 ae_int_t repterminationtype;
495 double repdebugeqerr;
496 double repdebugfs;
497 double repdebugff;
498 double repdebugdx;
499 ae_int_t repdebugfeasqpits;
500 ae_int_t repdebugfeasgpaits;
501 ae_vector xstart;
502 snnlssolver solver;
503 double fbase;
504 double fm2;
505 double fm1;
506 double fp1;
507 double fp2;
508 double xm1;
509 double xp1;
510 double gm1;
511 double gp1;
512 ae_int_t cidx;
513 double cval;
514 ae_vector tmpprec;
515 ae_vector tmp0;
516 ae_int_t nfev;
517 ae_int_t mcstage;
518 double stp;
519 double curstpmax;
520 double activationstep;
521 ae_vector work;
522 linminstate lstate;
523 double trimthreshold;
524 ae_int_t nonmonotoniccnt;
525 ae_matrix bufyk;
526 ae_matrix bufsk;
527 ae_vector bufrho;
528 ae_vector buftheta;
529 ae_int_t bufsize;
531typedef struct
532{
533 ae_int_t iterationscount;
534 ae_int_t nfev;
535 ae_int_t varidx;
536 ae_int_t terminationtype;
537 double debugeqerr;
538 double debugfs;
539 double debugff;
540 double debugdx;
541 ae_int_t debugfeasqpits;
542 ae_int_t debugfeasgpaits;
543 ae_int_t inneriterationscount;
544 ae_int_t outeriterationscount;
546typedef struct
547{
548 double epsg;
549 double epsf;
550 double epsx;
551 ae_int_t maxits;
553typedef struct
554{
555 minbleicstate solver;
556 minbleicreport solverrep;
557 ae_vector tmp0;
558 ae_vector tmp1;
559 ae_vector tmpi;
560 ae_int_t repinneriterationscount;
561 ae_int_t repouteriterationscount;
563typedef struct
564{
565 ae_int_t n;
566 qqpsettings qqpsettingsuser;
567 qpbleicsettings qpbleicsettingsuser;
568 qpdenseaulsettings qpdenseaulsettingsuser;
569 ae_bool dbgskipconstraintnormalization;
570 ae_int_t algokind;
571 ae_int_t akind;
573 sparsematrix sparsea;
574 ae_bool sparseaupper;
575 double absamax;
576 double absasum;
577 double absasum2;
578 ae_vector b;
579 ae_vector bndl;
580 ae_vector bndu;
581 ae_vector s;
582 ae_vector havebndl;
583 ae_vector havebndu;
584 ae_vector xorigin;
585 ae_vector startx;
586 ae_bool havex;
587 ae_matrix cleic;
588 ae_int_t nec;
589 ae_int_t nic;
590 sparsematrix scleic;
591 ae_int_t snec;
592 ae_int_t snic;
593 ae_vector xs;
594 ae_int_t repinneriterationscount;
595 ae_int_t repouteriterationscount;
596 ae_int_t repncholesky;
597 ae_int_t repnmv;
598 ae_int_t repterminationtype;
599 ae_vector tmp0;
600 ae_matrix ecleic;
601 ae_matrix dummyr2;
602 ae_bool qpbleicfirstcall;
603 qpbleicbuffers qpbleicbuf;
604 qqpbuffers qqpbuf;
605 qpdenseaulbuffers qpdenseaulbuf;
606 qpcholeskybuffers qpcholeskybuf;
607} minqpstate;
608typedef struct
609{
610 ae_int_t inneriterationscount;
611 ae_int_t outeriterationscount;
612 ae_int_t nmv;
613 ae_int_t ncholesky;
614 ae_int_t terminationtype;
616typedef struct
617{
618 double stabilizingpoint;
619 double initialinequalitymultiplier;
620 ae_int_t solvertype;
621 ae_int_t prectype;
622 ae_int_t updatefreq;
623 double rho;
624 ae_int_t n;
625 double epsg;
626 double epsf;
627 double epsx;
628 ae_int_t maxits;
629 ae_int_t aulitscnt;
630 ae_bool xrep;
631 double stpmax;
632 double diffstep;
633 double teststep;
634 ae_vector s;
635 ae_vector bndl;
636 ae_vector bndu;
637 ae_vector hasbndl;
638 ae_vector hasbndu;
639 ae_int_t nec;
640 ae_int_t nic;
641 ae_matrix cleic;
642 ae_int_t ng;
643 ae_int_t nh;
644 ae_vector x;
645 double f;
646 ae_vector fi;
647 ae_matrix j;
648 ae_bool needfij;
649 ae_bool needfi;
650 ae_bool xupdated;
651 rcommstate rstate;
652 rcommstate rstateaul;
653 ae_vector scaledbndl;
654 ae_vector scaledbndu;
655 ae_matrix scaledcleic;
656 ae_vector xc;
657 ae_vector xstart;
658 ae_vector xbase;
659 ae_vector fbase;
660 ae_vector dfbase;
661 ae_vector fm2;
662 ae_vector fm1;
663 ae_vector fp1;
664 ae_vector fp2;
665 ae_vector dfm1;
666 ae_vector dfp1;
667 ae_vector bufd;
668 ae_vector bufc;
669 ae_vector tmp0;
670 ae_matrix bufw;
671 ae_matrix bufz;
672 ae_vector xk;
673 ae_vector xk1;
674 ae_vector gk;
675 ae_vector gk1;
676 double gammak;
677 ae_bool xkpresent;
678 minlbfgsstate auloptimizer;
679 minlbfgsreport aulreport;
680 ae_vector nubc;
681 ae_vector nulc;
682 ae_vector nunlc;
683 ae_int_t repinneriterationscount;
684 ae_int_t repouteriterationscount;
685 ae_int_t repnfev;
686 ae_int_t repvaridx;
687 ae_int_t repfuncidx;
688 ae_int_t repterminationtype;
689 ae_int_t repdbgphase0its;
691typedef struct
692{
693 ae_int_t iterationscount;
694 ae_int_t nfev;
695 ae_int_t varidx;
696 ae_int_t funcidx;
697 ae_int_t terminationtype;
698 ae_int_t dbgphase0its;
700typedef struct
701{
702 ae_int_t nmain;
703 double epsg;
704 double epsf;
705 double epsx;
706 ae_int_t maxits;
707 ae_bool xrep;
708 double stpmax;
709 double diffstep;
710 ae_vector s;
711 ae_int_t prectype;
712 ae_vector diagh;
713 ae_vector x;
714 double f;
715 ae_vector g;
716 ae_bool needf;
717 ae_bool needfg;
718 ae_bool xupdated;
719 ae_bool userterminationneeded;
720 double teststep;
721 rcommstate rstate;
722 ae_vector xc;
723 ae_vector ugc;
724 ae_vector cgc;
725 ae_vector xn;
726 ae_vector ugn;
727 ae_vector cgn;
728 ae_vector xp;
729 double fc;
730 double fn;
731 double fp;
732 ae_vector d;
733 double lastscaledgoodstep;
734 ae_vector hasbndl;
735 ae_vector hasbndu;
736 ae_vector bndl;
737 ae_vector bndu;
738 ae_int_t repiterationscount;
739 ae_int_t repnfev;
740 ae_int_t repvaridx;
741 ae_int_t repterminationtype;
742 ae_vector xstart;
743 snnlssolver solver;
744 double fbase;
745 double fm2;
746 double fm1;
747 double fp1;
748 double fp2;
749 double xm1;
750 double xp1;
751 double gm1;
752 double gp1;
753 ae_vector tmpprec;
754 ae_vector tmp0;
755 ae_int_t nfev;
756 ae_int_t mcstage;
757 double stp;
758 double curstpmax;
759 ae_vector work;
760 linminstate lstate;
761 double trimthreshold;
762 ae_int_t nonmonotoniccnt;
763 ae_matrix bufyk;
764 ae_matrix bufsk;
765 ae_vector bufrho;
766 ae_vector buftheta;
767 ae_int_t bufsize;
768} minbcstate;
769typedef struct
770{
771 ae_int_t iterationscount;
772 ae_int_t nfev;
773 ae_int_t varidx;
774 ae_int_t terminationtype;
776typedef struct
777{
778 double fc;
779 double fn;
780 ae_vector xc;
781 ae_vector xn;
782 ae_vector x0;
783 ae_vector gc;
784 ae_vector d;
785 ae_matrix uh;
786 ae_matrix ch;
787 ae_matrix rk;
788 ae_vector invutc;
789 ae_vector tmp0;
790 ae_vector tmpidx;
791 ae_vector tmpd;
792 ae_vector tmpc;
793 ae_vector tmplambdas;
794 ae_matrix tmpc2;
795 ae_vector tmpb;
796 snnlssolver nnls;
797} minnsqp;
798typedef struct
799{
800 ae_int_t solvertype;
801 ae_int_t n;
802 double epsx;
803 ae_int_t maxits;
804 ae_bool xrep;
805 double diffstep;
806 ae_vector s;
807 ae_vector bndl;
808 ae_vector bndu;
809 ae_vector hasbndl;
810 ae_vector hasbndu;
811 ae_int_t nec;
812 ae_int_t nic;
813 ae_matrix cleic;
814 ae_int_t ng;
815 ae_int_t nh;
816 ae_vector x;
817 double f;
818 ae_vector fi;
819 ae_matrix j;
820 ae_bool needfij;
821 ae_bool needfi;
822 ae_bool xupdated;
823 rcommstate rstate;
824 rcommstate rstateags;
825 hqrndstate agsrs;
826 double agsradius;
827 ae_int_t agssamplesize;
828 double agsraddecay;
829 double agsalphadecay;
830 double agsdecrease;
831 double agsinitstp;
832 double agsstattold;
833 double agsshortstpabs;
834 double agsshortstprel;
835 double agsshortf;
836 ae_int_t agsshortlimit;
837 double agsrhononlinear;
838 ae_int_t agsminupdate;
839 ae_int_t agsmaxraddecays;
840 ae_int_t agsmaxbacktrack;
841 ae_int_t agsmaxbacktracknonfull;
842 double agspenaltylevel;
843 double agspenaltyincrease;
844 ae_vector xstart;
845 ae_vector xc;
846 ae_vector xn;
847 ae_vector grs;
848 ae_vector d;
849 ae_vector colmax;
850 ae_vector diagh;
851 ae_vector signmin;
852 ae_vector signmax;
853 ae_bool userterminationneeded;
854 ae_vector scaledbndl;
855 ae_vector scaledbndu;
856 ae_matrix scaledcleic;
857 ae_vector rholinear;
858 ae_matrix samplex;
859 ae_matrix samplegm;
860 ae_matrix samplegmbc;
861 ae_vector samplef;
862 ae_vector samplef0;
863 minnsqp nsqp;
864 ae_vector tmp0;
865 ae_vector tmp1;
866 ae_matrix tmp2;
867 ae_vector tmp3;
868 ae_vector xbase;
869 ae_vector fp;
870 ae_vector fm;
871 ae_int_t repinneriterationscount;
872 ae_int_t repouteriterationscount;
873 ae_int_t repnfev;
874 ae_int_t repvaridx;
875 ae_int_t repfuncidx;
876 ae_int_t repterminationtype;
877 double replcerr;
878 double repnlcerr;
879 ae_int_t dbgncholesky;
880} minnsstate;
881typedef struct
882{
883 ae_int_t iterationscount;
884 ae_int_t nfev;
885 double cerr;
886 double lcerr;
887 double nlcerr;
888 ae_int_t terminationtype;
889 ae_int_t varidx;
890 ae_int_t funcidx;
892typedef struct
893{
894 ae_int_t n;
895 double epsg;
896 double epsf;
897 double epsx;
898 ae_int_t maxits;
899 ae_bool xrep;
900 double stpmax;
901 ae_int_t cgtype;
902 ae_int_t k;
903 ae_int_t nfev;
904 ae_int_t mcstage;
905 ae_vector bndl;
906 ae_vector bndu;
907 ae_int_t curalgo;
908 ae_int_t acount;
909 double mu;
910 double finit;
911 double dginit;
912 ae_vector ak;
913 ae_vector xk;
914 ae_vector dk;
915 ae_vector an;
916 ae_vector xn;
917 ae_vector dn;
918 ae_vector d;
919 double fold;
920 double stp;
921 ae_vector work;
922 ae_vector yk;
923 ae_vector gc;
924 double laststep;
925 ae_vector x;
926 double f;
927 ae_vector g;
928 ae_bool needfg;
929 ae_bool xupdated;
930 rcommstate rstate;
931 ae_int_t repiterationscount;
932 ae_int_t repnfev;
933 ae_int_t repterminationtype;
934 ae_int_t debugrestartscount;
935 linminstate lstate;
936 double betahs;
937 double betady;
939typedef struct
940{
941 ae_int_t iterationscount;
942 ae_int_t nfev;
943 ae_int_t terminationtype;
944 ae_int_t activeconstraints;
946typedef struct
947{
948 ae_int_t n;
949 ae_int_t m;
950 double stpmax;
951 ae_int_t modelage;
952 ae_int_t maxmodelage;
953 ae_bool hasfi;
954 double epsx;
955 ae_vector x;
956 double f;
957 ae_vector fi;
958 ae_bool needf;
959 ae_bool needfi;
960 double fbase;
961 ae_vector modeldiag;
962 ae_vector xbase;
963 ae_vector fibase;
964 ae_vector bndl;
965 ae_vector bndu;
966 ae_vector havebndl;
967 ae_vector havebndu;
968 ae_vector s;
969 rcommstate rstate;
970 ae_vector xdir;
971 ae_vector choleskybuf;
972 ae_vector tmp0;
973 ae_vector tmpct;
974 double actualdecrease;
975 double predicteddecrease;
976 minqpstate qpstate;
977 minqpreport qprep;
978 sparsematrix tmpsp;
980typedef struct
981{
982 ae_int_t n;
983 ae_int_t m;
984 double diffstep;
985 double epsx;
986 ae_int_t maxits;
987 ae_bool xrep;
988 double stpmax;
989 ae_int_t maxmodelage;
990 ae_bool makeadditers;
991 ae_vector x;
992 double f;
993 ae_vector fi;
994 ae_matrix j;
995 ae_matrix h;
996 ae_vector g;
997 ae_bool needf;
998 ae_bool needfg;
999 ae_bool needfgh;
1000 ae_bool needfij;
1001 ae_bool needfi;
1002 ae_bool xupdated;
1003 ae_bool userterminationneeded;
1004 ae_int_t algomode;
1005 ae_bool hasf;
1006 ae_bool hasfi;
1007 ae_bool hasg;
1008 ae_vector xbase;
1009 double fbase;
1010 ae_vector fibase;
1011 ae_vector gbase;
1012 ae_matrix quadraticmodel;
1013 ae_vector bndl;
1014 ae_vector bndu;
1015 ae_vector havebndl;
1016 ae_vector havebndu;
1017 ae_vector s;
1018 ae_matrix cleic;
1019 ae_int_t nec;
1020 ae_int_t nic;
1021 double lambdav;
1022 double nu;
1023 ae_int_t modelage;
1024 ae_vector xnew;
1025 ae_vector xdir;
1026 ae_vector deltax;
1027 ae_vector deltaf;
1028 ae_bool deltaxready;
1029 ae_bool deltafready;
1030 double teststep;
1031 ae_int_t repiterationscount;
1032 ae_int_t repterminationtype;
1033 ae_int_t repfuncidx;
1034 ae_int_t repvaridx;
1035 ae_int_t repnfunc;
1036 ae_int_t repnjac;
1037 ae_int_t repngrad;
1038 ae_int_t repnhess;
1039 ae_int_t repncholesky;
1040 rcommstate rstate;
1041 ae_vector choleskybuf;
1042 ae_vector tmp0;
1043 double actualdecrease;
1044 double predicteddecrease;
1045 double xm1;
1046 double xp1;
1047 ae_vector fm1;
1048 ae_vector fp1;
1049 ae_vector fc1;
1050 ae_vector gm1;
1051 ae_vector gp1;
1052 ae_vector gc1;
1053 minlbfgsstate internalstate;
1054 minlbfgsreport internalrep;
1055 minqpstate qpstate;
1056 minqpreport qprep;
1057 minlmstepfinder finderstate;
1058} minlmstate;
1059typedef struct
1060{
1061 ae_int_t iterationscount;
1062 ae_int_t terminationtype;
1063 ae_int_t funcidx;
1064 ae_int_t varidx;
1065 ae_int_t nfunc;
1066 ae_int_t njac;
1067 ae_int_t ngrad;
1068 ae_int_t nhess;
1069 ae_int_t ncholesky;
1070} minlmreport;
1071
1072}
1073
1075//
1076// THIS SECTION CONTAINS C++ INTERFACE
1077//
1079namespace alglib
1080{
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092/*************************************************************************
1093
1094*************************************************************************/
1096{
1097public:
1100 _minlbfgsstate_owner& operator=(const _minlbfgsstate_owner &rhs);
1101 virtual ~_minlbfgsstate_owner();
1103 alglib_impl::minlbfgsstate* c_ptr() const;
1104protected:
1106};
1108{
1109public:
1110 minlbfgsstate();
1111 minlbfgsstate(const minlbfgsstate &rhs);
1112 minlbfgsstate& operator=(const minlbfgsstate &rhs);
1113 virtual ~minlbfgsstate();
1114 ae_bool &needf;
1115 ae_bool &needfg;
1116 ae_bool &xupdated;
1117 double &f;
1118 real_1d_array g;
1119 real_1d_array x;
1120
1121};
1122
1123
1124/*************************************************************************
1125This structure stores optimization report:
1126* IterationsCount total number of inner iterations
1127* NFEV number of gradient evaluations
1128* TerminationType termination type (see below)
1129
1130TERMINATION CODES
1131
1132TerminationType field contains completion code, which can be:
1133 -8 internal integrity control detected infinite or NAN values in
1134 function/gradient. Abnormal termination signalled.
1135 -7 gradient verification failed.
1136 See MinLBFGSSetGradientCheck() for more information.
1137 1 relative function improvement is no more than EpsF.
1138 2 relative step is no more than EpsX.
1139 4 gradient norm is no more than EpsG
1140 5 MaxIts steps was taken
1141 7 stopping conditions are too stringent,
1142 further improvement is impossible,
1143 X contains best point found so far.
1144 8 terminated by user who called minlbfgsrequesttermination().
1145 X contains point which was "current accepted" when termination
1146 request was submitted.
1147
1148Other fields of this structure are not documented and should not be used!
1149*************************************************************************/
1151{
1152public:
1155 _minlbfgsreport_owner& operator=(const _minlbfgsreport_owner &rhs);
1156 virtual ~_minlbfgsreport_owner();
1158 alglib_impl::minlbfgsreport* c_ptr() const;
1159protected:
1161};
1163{
1164public:
1166 minlbfgsreport(const minlbfgsreport &rhs);
1167 minlbfgsreport& operator=(const minlbfgsreport &rhs);
1168 virtual ~minlbfgsreport();
1169 ae_int_t &iterationscount;
1170 ae_int_t &nfev;
1171 ae_int_t &varidx;
1172 ae_int_t &terminationtype;
1173
1174};
1175
1176
1177
1178
1179
1180/*************************************************************************
1181This object stores state of the nonlinear CG optimizer.
1182
1183You should use ALGLIB functions to work with this object.
1184*************************************************************************/
1186{
1187public:
1190 _mincgstate_owner& operator=(const _mincgstate_owner &rhs);
1191 virtual ~_mincgstate_owner();
1192 alglib_impl::mincgstate* c_ptr();
1193 alglib_impl::mincgstate* c_ptr() const;
1194protected:
1195 alglib_impl::mincgstate *p_struct;
1196};
1198{
1199public:
1200 mincgstate();
1201 mincgstate(const mincgstate &rhs);
1202 mincgstate& operator=(const mincgstate &rhs);
1203 virtual ~mincgstate();
1204 ae_bool &needf;
1205 ae_bool &needfg;
1206 ae_bool &xupdated;
1207 double &f;
1208 real_1d_array g;
1209 real_1d_array x;
1210
1211};
1212
1213
1214/*************************************************************************
1215This structure stores optimization report:
1216* IterationsCount total number of inner iterations
1217* NFEV number of gradient evaluations
1218* TerminationType termination type (see below)
1219
1220TERMINATION CODES
1221
1222TerminationType field contains completion code, which can be:
1223 -8 internal integrity control detected infinite or NAN values in
1224 function/gradient. Abnormal termination signalled.
1225 -7 gradient verification failed.
1226 See MinCGSetGradientCheck() for more information.
1227 1 relative function improvement is no more than EpsF.
1228 2 relative step is no more than EpsX.
1229 4 gradient norm is no more than EpsG
1230 5 MaxIts steps was taken
1231 7 stopping conditions are too stringent,
1232 further improvement is impossible,
1233 X contains best point found so far.
1234 8 terminated by user who called mincgrequesttermination(). X contains
1235 point which was "current accepted" when termination request was
1236 submitted.
1237
1238Other fields of this structure are not documented and should not be used!
1239*************************************************************************/
1241{
1242public:
1245 _mincgreport_owner& operator=(const _mincgreport_owner &rhs);
1246 virtual ~_mincgreport_owner();
1247 alglib_impl::mincgreport* c_ptr();
1248 alglib_impl::mincgreport* c_ptr() const;
1249protected:
1250 alglib_impl::mincgreport *p_struct;
1251};
1253{
1254public:
1255 mincgreport();
1256 mincgreport(const mincgreport &rhs);
1257 mincgreport& operator=(const mincgreport &rhs);
1258 virtual ~mincgreport();
1259 ae_int_t &iterationscount;
1260 ae_int_t &nfev;
1261 ae_int_t &varidx;
1262 ae_int_t &terminationtype;
1263
1264};
1265
1266/*************************************************************************
1267This object stores nonlinear optimizer state.
1268You should use functions provided by MinBLEIC subpackage to work with this
1269object
1270*************************************************************************/
1272{
1273public:
1276 _minbleicstate_owner& operator=(const _minbleicstate_owner &rhs);
1277 virtual ~_minbleicstate_owner();
1279 alglib_impl::minbleicstate* c_ptr() const;
1280protected:
1282};
1284{
1285public:
1286 minbleicstate();
1287 minbleicstate(const minbleicstate &rhs);
1288 minbleicstate& operator=(const minbleicstate &rhs);
1289 virtual ~minbleicstate();
1290 ae_bool &needf;
1291 ae_bool &needfg;
1292 ae_bool &xupdated;
1293 double &f;
1294 real_1d_array g;
1295 real_1d_array x;
1296
1297};
1298
1299
1300/*************************************************************************
1301This structure stores optimization report:
1302* IterationsCount number of iterations
1303* NFEV number of gradient evaluations
1304* TerminationType termination type (see below)
1305
1306TERMINATION CODES
1307
1308TerminationType field contains completion code, which can be:
1309 -8 internal integrity control detected infinite or NAN values in
1310 function/gradient. Abnormal termination signalled.
1311 -7 gradient verification failed.
1312 See MinBLEICSetGradientCheck() for more information.
1313 -3 inconsistent constraints. Feasible point is
1314 either nonexistent or too hard to find. Try to
1315 restart optimizer with better initial approximation
1316 1 relative function improvement is no more than EpsF.
1317 2 relative step is no more than EpsX.
1318 4 gradient norm is no more than EpsG
1319 5 MaxIts steps was taken
1320 7 stopping conditions are too stringent,
1321 further improvement is impossible,
1322 X contains best point found so far.
1323 8 terminated by user who called minbleicrequesttermination(). X contains
1324 point which was "current accepted" when termination request was
1325 submitted.
1326
1327ADDITIONAL FIELDS
1328
1329There are additional fields which can be used for debugging:
1330* DebugEqErr error in the equality constraints (2-norm)
1331* DebugFS f, calculated at projection of initial point
1332 to the feasible set
1333* DebugFF f, calculated at the final point
1334* DebugDX |X_start-X_final|
1335*************************************************************************/
1337{
1338public:
1341 _minbleicreport_owner& operator=(const _minbleicreport_owner &rhs);
1342 virtual ~_minbleicreport_owner();
1344 alglib_impl::minbleicreport* c_ptr() const;
1345protected:
1347};
1349{
1350public:
1352 minbleicreport(const minbleicreport &rhs);
1353 minbleicreport& operator=(const minbleicreport &rhs);
1354 virtual ~minbleicreport();
1355 ae_int_t &iterationscount;
1356 ae_int_t &nfev;
1357 ae_int_t &varidx;
1358 ae_int_t &terminationtype;
1359 double &debugeqerr;
1360 double &debugfs;
1361 double &debugff;
1362 double &debugdx;
1363 ae_int_t &debugfeasqpits;
1364 ae_int_t &debugfeasgpaits;
1365 ae_int_t &inneriterationscount;
1366 ae_int_t &outeriterationscount;
1367
1368};
1369
1370
1371
1372/*************************************************************************
1373This object stores nonlinear optimizer state.
1374You should use functions provided by MinQP subpackage to work with this
1375object
1376*************************************************************************/
1378{
1379public:
1382 _minqpstate_owner& operator=(const _minqpstate_owner &rhs);
1383 virtual ~_minqpstate_owner();
1384 alglib_impl::minqpstate* c_ptr();
1385 alglib_impl::minqpstate* c_ptr() const;
1386protected:
1387 alglib_impl::minqpstate *p_struct;
1388};
1390{
1391public:
1392 minqpstate();
1393 minqpstate(const minqpstate &rhs);
1394 minqpstate& operator=(const minqpstate &rhs);
1395 virtual ~minqpstate();
1396
1397};
1398
1399
1400/*************************************************************************
1401This structure stores optimization report:
1402* InnerIterationsCount number of inner iterations
1403* OuterIterationsCount number of outer iterations
1404* NCholesky number of Cholesky decomposition
1405* NMV number of matrix-vector products
1406 (only products calculated as part of iterative
1407 process are counted)
1408* TerminationType completion code (see below)
1409
1410Completion codes:
1411* -5 inappropriate solver was used:
1412 * QuickQP solver for problem with general linear constraints (dense/sparse)
1413* -4 BLEIC-QP or QuickQP solver found unconstrained direction
1414 of negative curvature (function is unbounded from
1415 below even under constraints), no meaningful
1416 minimum can be found.
1417* -3 inconsistent constraints (or, maybe, feasible point is
1418 too hard to find). If you are sure that constraints are feasible,
1419 try to restart optimizer with better initial approximation.
1420* -1 solver error
1421* 1..4 successful completion
1422* 5 MaxIts steps was taken
1423* 7 stopping conditions are too stringent,
1424 further improvement is impossible,
1425 X contains best point found so far.
1426*************************************************************************/
1428{
1429public:
1432 _minqpreport_owner& operator=(const _minqpreport_owner &rhs);
1433 virtual ~_minqpreport_owner();
1434 alglib_impl::minqpreport* c_ptr();
1435 alglib_impl::minqpreport* c_ptr() const;
1436protected:
1437 alglib_impl::minqpreport *p_struct;
1438};
1440{
1441public:
1442 minqpreport();
1443 minqpreport(const minqpreport &rhs);
1444 minqpreport& operator=(const minqpreport &rhs);
1445 virtual ~minqpreport();
1446 ae_int_t &inneriterationscount;
1447 ae_int_t &outeriterationscount;
1448 ae_int_t &nmv;
1449 ae_int_t &ncholesky;
1450 ae_int_t &terminationtype;
1451
1452};
1453
1454/*************************************************************************
1455This object stores nonlinear optimizer state.
1456You should use functions provided by MinNLC subpackage to work with this
1457object
1458*************************************************************************/
1460{
1461public:
1464 _minnlcstate_owner& operator=(const _minnlcstate_owner &rhs);
1465 virtual ~_minnlcstate_owner();
1466 alglib_impl::minnlcstate* c_ptr();
1467 alglib_impl::minnlcstate* c_ptr() const;
1468protected:
1469 alglib_impl::minnlcstate *p_struct;
1470};
1472{
1473public:
1474 minnlcstate();
1475 minnlcstate(const minnlcstate &rhs);
1476 minnlcstate& operator=(const minnlcstate &rhs);
1477 virtual ~minnlcstate();
1478 ae_bool &needfi;
1479 ae_bool &needfij;
1480 ae_bool &xupdated;
1481 double &f;
1482 real_1d_array fi;
1483 real_2d_array j;
1484 real_1d_array x;
1485
1486};
1487
1488
1489/*************************************************************************
1490This structure stores optimization report:
1491* IterationsCount total number of inner iterations
1492* NFEV number of gradient evaluations
1493* TerminationType termination type (see below)
1494
1495TERMINATION CODES
1496
1497TerminationType field contains completion code, which can be:
1498 -8 internal integrity control detected infinite or NAN values in
1499 function/gradient. Abnormal termination signalled.
1500 -7 gradient verification failed.
1501 See MinNLCSetGradientCheck() for more information.
1502 1 relative function improvement is no more than EpsF.
1503 2 relative step is no more than EpsX.
1504 4 gradient norm is no more than EpsG
1505 5 MaxIts steps was taken
1506 7 stopping conditions are too stringent,
1507 further improvement is impossible,
1508 X contains best point found so far.
1509
1510Other fields of this structure are not documented and should not be used!
1511*************************************************************************/
1513{
1514public:
1517 _minnlcreport_owner& operator=(const _minnlcreport_owner &rhs);
1518 virtual ~_minnlcreport_owner();
1520 alglib_impl::minnlcreport* c_ptr() const;
1521protected:
1522 alglib_impl::minnlcreport *p_struct;
1523};
1525{
1526public:
1527 minnlcreport();
1528 minnlcreport(const minnlcreport &rhs);
1529 minnlcreport& operator=(const minnlcreport &rhs);
1530 virtual ~minnlcreport();
1531 ae_int_t &iterationscount;
1532 ae_int_t &nfev;
1533 ae_int_t &varidx;
1534 ae_int_t &funcidx;
1535 ae_int_t &terminationtype;
1536 ae_int_t &dbgphase0its;
1537
1538};
1539
1540/*************************************************************************
1541This object stores nonlinear optimizer state.
1542You should use functions provided by MinBC subpackage to work with this
1543object
1544*************************************************************************/
1546{
1547public:
1550 _minbcstate_owner& operator=(const _minbcstate_owner &rhs);
1551 virtual ~_minbcstate_owner();
1552 alglib_impl::minbcstate* c_ptr();
1553 alglib_impl::minbcstate* c_ptr() const;
1554protected:
1555 alglib_impl::minbcstate *p_struct;
1556};
1558{
1559public:
1560 minbcstate();
1561 minbcstate(const minbcstate &rhs);
1562 minbcstate& operator=(const minbcstate &rhs);
1563 virtual ~minbcstate();
1564 ae_bool &needf;
1565 ae_bool &needfg;
1566 ae_bool &xupdated;
1567 double &f;
1568 real_1d_array g;
1569 real_1d_array x;
1570
1571};
1572
1573
1574/*************************************************************************
1575This structure stores optimization report:
1576* IterationsCount number of iterations
1577* NFEV number of gradient evaluations
1578* TerminationType termination type (see below)
1579
1580TERMINATION CODES
1581
1582TerminationType field contains completion code, which can be:
1583 -8 internal integrity control detected infinite or NAN values in
1584 function/gradient. Abnormal termination signalled.
1585 -7 gradient verification failed.
1586 See MinBCSetGradientCheck() for more information.
1587 -3 inconsistent constraints.
1588 1 relative function improvement is no more than EpsF.
1589 2 relative step is no more than EpsX.
1590 4 gradient norm is no more than EpsG
1591 5 MaxIts steps was taken
1592 7 stopping conditions are too stringent,
1593 further improvement is impossible,
1594 X contains best point found so far.
1595 8 terminated by user who called minbcrequesttermination(). X contains
1596 point which was "current accepted" when termination request was
1597 submitted.
1598
1599ADDITIONAL FIELDS
1600
1601There are additional fields which can be used for debugging:
1602* DebugEqErr error in the equality constraints (2-norm)
1603* DebugFS f, calculated at projection of initial point
1604 to the feasible set
1605* DebugFF f, calculated at the final point
1606* DebugDX |X_start-X_final|
1607*************************************************************************/
1609{
1610public:
1613 _minbcreport_owner& operator=(const _minbcreport_owner &rhs);
1614 virtual ~_minbcreport_owner();
1615 alglib_impl::minbcreport* c_ptr();
1616 alglib_impl::minbcreport* c_ptr() const;
1617protected:
1618 alglib_impl::minbcreport *p_struct;
1619};
1621{
1622public:
1623 minbcreport();
1624 minbcreport(const minbcreport &rhs);
1625 minbcreport& operator=(const minbcreport &rhs);
1626 virtual ~minbcreport();
1627 ae_int_t &iterationscount;
1628 ae_int_t &nfev;
1629 ae_int_t &varidx;
1630 ae_int_t &terminationtype;
1631
1632};
1633
1634/*************************************************************************
1635This object stores nonlinear optimizer state.
1636You should use functions provided by MinNS subpackage to work with this
1637object
1638*************************************************************************/
1640{
1641public:
1644 _minnsstate_owner& operator=(const _minnsstate_owner &rhs);
1645 virtual ~_minnsstate_owner();
1646 alglib_impl::minnsstate* c_ptr();
1647 alglib_impl::minnsstate* c_ptr() const;
1648protected:
1649 alglib_impl::minnsstate *p_struct;
1650};
1652{
1653public:
1654 minnsstate();
1655 minnsstate(const minnsstate &rhs);
1656 minnsstate& operator=(const minnsstate &rhs);
1657 virtual ~minnsstate();
1658 ae_bool &needfi;
1659 ae_bool &needfij;
1660 ae_bool &xupdated;
1661 double &f;
1662 real_1d_array fi;
1663 real_2d_array j;
1664 real_1d_array x;
1665
1666};
1667
1668
1669/*************************************************************************
1670This structure stores optimization report:
1671* IterationsCount total number of inner iterations
1672* NFEV number of gradient evaluations
1673* TerminationType termination type (see below)
1674* CErr maximum violation of all types of constraints
1675* LCErr maximum violation of linear constraints
1676* NLCErr maximum violation of nonlinear constraints
1677
1678TERMINATION CODES
1679
1680TerminationType field contains completion code, which can be:
1681 -8 internal integrity control detected infinite or NAN values in
1682 function/gradient. Abnormal termination signalled.
1683 -3 box constraints are inconsistent
1684 -1 inconsistent parameters were passed:
1685 * penalty parameter for minnssetalgoags() is zero,
1686 but we have nonlinear constraints set by minnssetnlc()
1687 2 sampling radius decreased below epsx
1688 5 MaxIts steps was taken
1689 7 stopping conditions are too stringent,
1690 further improvement is impossible,
1691 X contains best point found so far.
1692 8 User requested termination via MinNSRequestTermination()
1693
1694Other fields of this structure are not documented and should not be used!
1695*************************************************************************/
1697{
1698public:
1701 _minnsreport_owner& operator=(const _minnsreport_owner &rhs);
1702 virtual ~_minnsreport_owner();
1703 alglib_impl::minnsreport* c_ptr();
1704 alglib_impl::minnsreport* c_ptr() const;
1705protected:
1706 alglib_impl::minnsreport *p_struct;
1707};
1709{
1710public:
1711 minnsreport();
1712 minnsreport(const minnsreport &rhs);
1713 minnsreport& operator=(const minnsreport &rhs);
1714 virtual ~minnsreport();
1715 ae_int_t &iterationscount;
1716 ae_int_t &nfev;
1717 double &cerr;
1718 double &lcerr;
1719 double &nlcerr;
1720 ae_int_t &terminationtype;
1721 ae_int_t &varidx;
1722 ae_int_t &funcidx;
1723
1724};
1725
1726/*************************************************************************
1727
1728*************************************************************************/
1730{
1731public:
1734 _minasastate_owner& operator=(const _minasastate_owner &rhs);
1735 virtual ~_minasastate_owner();
1736 alglib_impl::minasastate* c_ptr();
1737 alglib_impl::minasastate* c_ptr() const;
1738protected:
1739 alglib_impl::minasastate *p_struct;
1740};
1742{
1743public:
1744 minasastate();
1745 minasastate(const minasastate &rhs);
1746 minasastate& operator=(const minasastate &rhs);
1747 virtual ~minasastate();
1748 ae_bool &needfg;
1749 ae_bool &xupdated;
1750 double &f;
1751 real_1d_array g;
1752 real_1d_array x;
1753
1754};
1755
1756
1757/*************************************************************************
1758
1759*************************************************************************/
1761{
1762public:
1765 _minasareport_owner& operator=(const _minasareport_owner &rhs);
1766 virtual ~_minasareport_owner();
1768 alglib_impl::minasareport* c_ptr() const;
1769protected:
1770 alglib_impl::minasareport *p_struct;
1771};
1773{
1774public:
1775 minasareport();
1776 minasareport(const minasareport &rhs);
1777 minasareport& operator=(const minasareport &rhs);
1778 virtual ~minasareport();
1779 ae_int_t &iterationscount;
1780 ae_int_t &nfev;
1781 ae_int_t &terminationtype;
1782 ae_int_t &activeconstraints;
1783
1784};
1785
1786/*************************************************************************
1787Levenberg-Marquardt optimizer.
1788
1789This structure should be created using one of the MinLMCreate???()
1790functions. You should not access its fields directly; use ALGLIB functions
1791to work with it.
1792*************************************************************************/
1794{
1795public:
1798 _minlmstate_owner& operator=(const _minlmstate_owner &rhs);
1799 virtual ~_minlmstate_owner();
1800 alglib_impl::minlmstate* c_ptr();
1801 alglib_impl::minlmstate* c_ptr() const;
1802protected:
1803 alglib_impl::minlmstate *p_struct;
1804};
1806{
1807public:
1808 minlmstate();
1809 minlmstate(const minlmstate &rhs);
1810 minlmstate& operator=(const minlmstate &rhs);
1811 virtual ~minlmstate();
1812 ae_bool &needf;
1813 ae_bool &needfg;
1814 ae_bool &needfgh;
1815 ae_bool &needfi;
1816 ae_bool &needfij;
1817 ae_bool &xupdated;
1818 double &f;
1819 real_1d_array fi;
1820 real_1d_array g;
1821 real_2d_array h;
1822 real_2d_array j;
1823 real_1d_array x;
1824
1825};
1826
1827
1828/*************************************************************************
1829Optimization report, filled by MinLMResults() function
1830
1831FIELDS:
1832* TerminationType, completetion code:
1833 * -8 optimizer detected NAN/INF values either in the function itself,
1834 or in its Jacobian
1835 * -7 derivative correctness check failed;
1836 see rep.funcidx, rep.varidx for
1837 more information.
1838 * -5 inappropriate solver was used:
1839 * solver created with minlmcreatefgh() used on problem with
1840 general linear constraints (set with minlmsetlc() call).
1841 * -3 constraints are inconsistent
1842 * 2 relative step is no more than EpsX.
1843 * 5 MaxIts steps was taken
1844 * 7 stopping conditions are too stringent,
1845 further improvement is impossible
1846 * 8 terminated by user who called MinLMRequestTermination().
1847 X contains point which was "current accepted" when termination
1848 request was submitted.
1849* IterationsCount, contains iterations count
1850* NFunc, number of function calculations
1851* NJac, number of Jacobi matrix calculations
1852* NGrad, number of gradient calculations
1853* NHess, number of Hessian calculations
1854* NCholesky, number of Cholesky decomposition calculations
1855*************************************************************************/
1857{
1858public:
1861 _minlmreport_owner& operator=(const _minlmreport_owner &rhs);
1862 virtual ~_minlmreport_owner();
1863 alglib_impl::minlmreport* c_ptr();
1864 alglib_impl::minlmreport* c_ptr() const;
1865protected:
1866 alglib_impl::minlmreport *p_struct;
1867};
1869{
1870public:
1871 minlmreport();
1872 minlmreport(const minlmreport &rhs);
1873 minlmreport& operator=(const minlmreport &rhs);
1874 virtual ~minlmreport();
1875 ae_int_t &iterationscount;
1876 ae_int_t &terminationtype;
1877 ae_int_t &funcidx;
1878 ae_int_t &varidx;
1879 ae_int_t &nfunc;
1880 ae_int_t &njac;
1881 ae_int_t &ngrad;
1882 ae_int_t &nhess;
1883 ae_int_t &ncholesky;
1884
1885};
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897/*************************************************************************
1898 LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION
1899
1900DESCRIPTION:
1901The subroutine minimizes function F(x) of N arguments by using a quasi-
1902Newton method (LBFGS scheme) which is optimized to use a minimum amount
1903of memory.
1904The subroutine generates the approximation of an inverse Hessian matrix by
1905using information about the last M steps of the algorithm (instead of N).
1906It lessens a required amount of memory from a value of order N^2 to a
1907value of order 2*N*M.
1908
1909
1910REQUIREMENTS:
1911Algorithm will request following information during its operation:
1912* function value F and its gradient G (simultaneously) at given point X
1913
1914
1915USAGE:
19161. User initializes algorithm state with MinLBFGSCreate() call
19172. User tunes solver parameters with MinLBFGSSetCond() MinLBFGSSetStpMax()
1918 and other functions
19193. User calls MinLBFGSOptimize() function which takes algorithm state and
1920 pointer (delegate, etc.) to callback function which calculates F/G.
19214. User calls MinLBFGSResults() to get solution
19225. Optionally user may call MinLBFGSRestartFrom() to solve another problem
1923 with same N/M but another starting point and/or another function.
1924 MinLBFGSRestartFrom() allows to reuse already initialized structure.
1925
1926
1927INPUT PARAMETERS:
1928 N - problem dimension. N>0
1929 M - number of corrections in the BFGS scheme of Hessian
1930 approximation update. Recommended value: 3<=M<=7. The smaller
1931 value causes worse convergence, the bigger will not cause a
1932 considerably better convergence, but will cause a fall in the
1933 performance. M<=N.
1934 X - initial solution approximation, array[0..N-1].
1935
1936
1937OUTPUT PARAMETERS:
1938 State - structure which stores algorithm state
1939
1940
1941NOTES:
19421. you may tune stopping conditions with MinLBFGSSetCond() function
19432. if target function contains exp() or other fast growing functions, and
1944 optimization algorithm makes too large steps which leads to overflow,
1945 use MinLBFGSSetStpMax() function to bound algorithm's steps. However,
1946 L-BFGS rarely needs such a tuning.
1947
1948
1949 -- ALGLIB --
1950 Copyright 02.04.2010 by Bochkanov Sergey
1951*************************************************************************/
1952void minlbfgscreate(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlbfgsstate &state);
1953void minlbfgscreate(const ae_int_t m, const real_1d_array &x, minlbfgsstate &state);
1954
1955
1956/*************************************************************************
1957The subroutine is finite difference variant of MinLBFGSCreate(). It uses
1958finite differences in order to differentiate target function.
1959
1960Description below contains information which is specific to this function
1961only. We recommend to read comments on MinLBFGSCreate() in order to get
1962more information about creation of LBFGS optimizer.
1963
1964INPUT PARAMETERS:
1965 N - problem dimension, N>0:
1966 * if given, only leading N elements of X are used
1967 * if not given, automatically determined from size of X
1968 M - number of corrections in the BFGS scheme of Hessian
1969 approximation update. Recommended value: 3<=M<=7. The smaller
1970 value causes worse convergence, the bigger will not cause a
1971 considerably better convergence, but will cause a fall in the
1972 performance. M<=N.
1973 X - starting point, array[0..N-1].
1974 DiffStep- differentiation step, >0
1975
1976OUTPUT PARAMETERS:
1977 State - structure which stores algorithm state
1978
1979NOTES:
19801. algorithm uses 4-point central formula for differentiation.
19812. differentiation step along I-th axis is equal to DiffStep*S[I] where
1982 S[] is scaling vector which can be set by MinLBFGSSetScale() call.
19833. we recommend you to use moderate values of differentiation step. Too
1984 large step will result in too large truncation errors, while too small
1985 step will result in too large numerical errors. 1.0E-6 can be good
1986 value to start with.
19874. Numerical differentiation is very inefficient - one gradient
1988 calculation needs 4*N function evaluations. This function will work for
1989 any N - either small (1...10), moderate (10...100) or large (100...).
1990 However, performance penalty will be too severe for any N's except for
1991 small ones.
1992 We should also say that code which relies on numerical differentiation
1993 is less robust and precise. LBFGS needs exact gradient values.
1994 Imprecise gradient may slow down convergence, especially on highly
1995 nonlinear problems.
1996 Thus we recommend to use this function for fast prototyping on small-
1997 dimensional problems only, and to implement analytical gradient as soon
1998 as possible.
1999
2000 -- ALGLIB --
2001 Copyright 16.05.2011 by Bochkanov Sergey
2002*************************************************************************/
2003void minlbfgscreatef(const ae_int_t n, const ae_int_t m, const real_1d_array &x, const double diffstep, minlbfgsstate &state);
2004void minlbfgscreatef(const ae_int_t m, const real_1d_array &x, const double diffstep, minlbfgsstate &state);
2005
2006
2007/*************************************************************************
2008This function sets stopping conditions for L-BFGS optimization algorithm.
2009
2010INPUT PARAMETERS:
2011 State - structure which stores algorithm state
2012 EpsG - >=0
2013 The subroutine finishes its work if the condition
2014 |v|<EpsG is satisfied, where:
2015 * |.| means Euclidian norm
2016 * v - scaled gradient vector, v[i]=g[i]*s[i]
2017 * g - gradient
2018 * s - scaling coefficients set by MinLBFGSSetScale()
2019 EpsF - >=0
2020 The subroutine finishes its work if on k+1-th iteration
2021 the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
2022 is satisfied.
2023 EpsX - >=0
2024 The subroutine finishes its work if on k+1-th iteration
2025 the condition |v|<=EpsX is fulfilled, where:
2026 * |.| means Euclidian norm
2027 * v - scaled step vector, v[i]=dx[i]/s[i]
2028 * dx - ste pvector, dx=X(k+1)-X(k)
2029 * s - scaling coefficients set by MinLBFGSSetScale()
2030 MaxIts - maximum number of iterations. If MaxIts=0, the number of
2031 iterations is unlimited.
2032
2033Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
2034automatic stopping criterion selection (small EpsX).
2035
2036 -- ALGLIB --
2037 Copyright 02.04.2010 by Bochkanov Sergey
2038*************************************************************************/
2039void minlbfgssetcond(const minlbfgsstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
2040
2041
2042/*************************************************************************
2043This function turns on/off reporting.
2044
2045INPUT PARAMETERS:
2046 State - structure which stores algorithm state
2047 NeedXRep- whether iteration reports are needed or not
2048
2049If NeedXRep is True, algorithm will call rep() callback function if it is
2050provided to MinLBFGSOptimize().
2051
2052
2053 -- ALGLIB --
2054 Copyright 02.04.2010 by Bochkanov Sergey
2055*************************************************************************/
2056void minlbfgssetxrep(const minlbfgsstate &state, const bool needxrep);
2057
2058
2059/*************************************************************************
2060This function sets maximum step length
2061
2062INPUT PARAMETERS:
2063 State - structure which stores algorithm state
2064 StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if
2065 you don't want to limit step length.
2066
2067Use this subroutine when you optimize target function which contains exp()
2068or other fast growing functions, and optimization algorithm makes too
2069large steps which leads to overflow. This function allows us to reject
2070steps that are too large (and therefore expose us to the possible
2071overflow) without actually calculating function value at the x+stp*d.
2072
2073 -- ALGLIB --
2074 Copyright 02.04.2010 by Bochkanov Sergey
2075*************************************************************************/
2076void minlbfgssetstpmax(const minlbfgsstate &state, const double stpmax);
2077
2078
2079/*************************************************************************
2080This function sets scaling coefficients for LBFGS optimizer.
2081
2082ALGLIB optimizers use scaling matrices to test stopping conditions (step
2083size and gradient are scaled before comparison with tolerances). Scale of
2084the I-th variable is a translation invariant measure of:
2085a) "how large" the variable is
2086b) how large the step should be to make significant changes in the function
2087
2088Scaling is also used by finite difference variant of the optimizer - step
2089along I-th axis is equal to DiffStep*S[I].
2090
2091In most optimizers (and in the LBFGS too) scaling is NOT a form of
2092preconditioning. It just affects stopping conditions. You should set
2093preconditioner by separate call to one of the MinLBFGSSetPrec...()
2094functions.
2095
2096There is special preconditioning mode, however, which uses scaling
2097coefficients to form diagonal preconditioning matrix. You can turn this
2098mode on, if you want. But you should understand that scaling is not the
2099same thing as preconditioning - these are two different, although related
2100forms of tuning solver.
2101
2102INPUT PARAMETERS:
2103 State - structure stores algorithm state
2104 S - array[N], non-zero scaling coefficients
2105 S[i] may be negative, sign doesn't matter.
2106
2107 -- ALGLIB --
2108 Copyright 14.01.2011 by Bochkanov Sergey
2109*************************************************************************/
2110void minlbfgssetscale(const minlbfgsstate &state, const real_1d_array &s);
2111
2112
2113/*************************************************************************
2114Modification of the preconditioner: default preconditioner (simple
2115scaling, same for all elements of X) is used.
2116
2117INPUT PARAMETERS:
2118 State - structure which stores algorithm state
2119
2120NOTE: you can change preconditioner "on the fly", during algorithm
2121iterations.
2122
2123 -- ALGLIB --
2124 Copyright 13.10.2010 by Bochkanov Sergey
2125*************************************************************************/
2126void minlbfgssetprecdefault(const minlbfgsstate &state);
2127
2128
2129/*************************************************************************
2130Modification of the preconditioner: Cholesky factorization of approximate
2131Hessian is used.
2132
2133INPUT PARAMETERS:
2134 State - structure which stores algorithm state
2135 P - triangular preconditioner, Cholesky factorization of
2136 the approximate Hessian. array[0..N-1,0..N-1],
2137 (if larger, only leading N elements are used).
2138 IsUpper - whether upper or lower triangle of P is given
2139 (other triangle is not referenced)
2140
2141After call to this function preconditioner is changed to P (P is copied
2142into the internal buffer).
2143
2144NOTE: you can change preconditioner "on the fly", during algorithm
2145iterations.
2146
2147NOTE 2: P should be nonsingular. Exception will be thrown otherwise.
2148
2149 -- ALGLIB --
2150 Copyright 13.10.2010 by Bochkanov Sergey
2151*************************************************************************/
2152void minlbfgssetpreccholesky(const minlbfgsstate &state, const real_2d_array &p, const bool isupper);
2153
2154
2155/*************************************************************************
2156Modification of the preconditioner: diagonal of approximate Hessian is
2157used.
2158
2159INPUT PARAMETERS:
2160 State - structure which stores algorithm state
2161 D - diagonal of the approximate Hessian, array[0..N-1],
2162 (if larger, only leading N elements are used).
2163
2164NOTE: you can change preconditioner "on the fly", during algorithm
2165iterations.
2166
2167NOTE 2: D[i] should be positive. Exception will be thrown otherwise.
2168
2169NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
2170
2171 -- ALGLIB --
2172 Copyright 13.10.2010 by Bochkanov Sergey
2173*************************************************************************/
2174void minlbfgssetprecdiag(const minlbfgsstate &state, const real_1d_array &d);
2175
2176
2177/*************************************************************************
2178Modification of the preconditioner: scale-based diagonal preconditioning.
2179
2180This preconditioning mode can be useful when you don't have approximate
2181diagonal of Hessian, but you know that your variables are badly scaled
2182(for example, one variable is in [1,10], and another in [1000,100000]),
2183and most part of the ill-conditioning comes from different scales of vars.
2184
2185In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2),
2186can greatly improve convergence.
2187
2188IMPRTANT: you should set scale of your variables with MinLBFGSSetScale()
2189call (before or after MinLBFGSSetPrecScale() call). Without knowledge of
2190the scale of your variables scale-based preconditioner will be just unit
2191matrix.
2192
2193INPUT PARAMETERS:
2194 State - structure which stores algorithm state
2195
2196 -- ALGLIB --
2197 Copyright 13.10.2010 by Bochkanov Sergey
2198*************************************************************************/
2199void minlbfgssetprecscale(const minlbfgsstate &state);
2200
2201
2202/*************************************************************************
2203This function provides reverse communication interface
2204Reverse communication interface is not documented or recommended to use.
2205See below for functions which provide better documented API
2206*************************************************************************/
2207bool minlbfgsiteration(const minlbfgsstate &state);
2208
2209
2210/*************************************************************************
2211This family of functions is used to launcn iterations of nonlinear optimizer
2212
2213These functions accept following parameters:
2214 state - algorithm state
2215 func - callback which calculates function (or merit function)
2216 value func at given point x
2217 grad - callback which calculates function (or merit function)
2218 value func and gradient grad at given point x
2219 rep - optional callback which is called after each iteration
2220 can be NULL
2221 ptr - optional pointer which is passed to func/grad/hess/jac/rep
2222 can be NULL
2223
2224NOTES:
2225
22261. This function has two different implementations: one which uses exact
2227 (analytical) user-supplied gradient, and one which uses function value
2228 only and numerically differentiates function in order to obtain
2229 gradient.
2230
2231 Depending on the specific function used to create optimizer object
2232 (either MinLBFGSCreate() for analytical gradient or MinLBFGSCreateF()
2233 for numerical differentiation) you should choose appropriate variant of
2234 MinLBFGSOptimize() - one which accepts function AND gradient or one
2235 which accepts function ONLY.
2236
2237 Be careful to choose variant of MinLBFGSOptimize() which corresponds to
2238 your optimization scheme! Table below lists different combinations of
2239 callback (function/gradient) passed to MinLBFGSOptimize() and specific
2240 function used to create optimizer.
2241
2242
2243 | USER PASSED TO MinLBFGSOptimize()
2244 CREATED WITH | function only | function and gradient
2245 ------------------------------------------------------------
2246 MinLBFGSCreateF() | work FAIL
2247 MinLBFGSCreate() | FAIL work
2248
2249 Here "FAIL" denotes inappropriate combinations of optimizer creation
2250 function and MinLBFGSOptimize() version. Attemps to use such
2251 combination (for example, to create optimizer with MinLBFGSCreateF() and
2252 to pass gradient information to MinCGOptimize()) will lead to exception
2253 being thrown. Either you did not pass gradient when it WAS needed or
2254 you passed gradient when it was NOT needed.
2255
2256 -- ALGLIB --
2257 Copyright 20.03.2009 by Bochkanov Sergey
2258
2259*************************************************************************/
2260void minlbfgsoptimize(minlbfgsstate &state,
2261 void (*func)(const real_1d_array &x, double &func, void *ptr),
2262 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
2263 void *ptr = NULL);
2264void minlbfgsoptimize(minlbfgsstate &state,
2265 void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
2266 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
2267 void *ptr = NULL);
2268
2269
2270/*************************************************************************
2271L-BFGS algorithm results
2272
2273INPUT PARAMETERS:
2274 State - algorithm state
2275
2276OUTPUT PARAMETERS:
2277 X - array[0..N-1], solution
2278 Rep - optimization report:
2279 * Rep.TerminationType completetion code:
2280 * -8 internal integrity control detected infinite
2281 or NAN values in function/gradient. Abnormal
2282 termination signalled.
2283 * -7 gradient verification failed.
2284 See MinLBFGSSetGradientCheck() for more information.
2285 * -2 rounding errors prevent further improvement.
2286 X contains best point found.
2287 * -1 incorrect parameters were specified
2288 * 1 relative function improvement is no more than
2289 EpsF.
2290 * 2 relative step is no more than EpsX.
2291 * 4 gradient norm is no more than EpsG
2292 * 5 MaxIts steps was taken
2293 * 7 stopping conditions are too stringent,
2294 further improvement is impossible
2295 * 8 terminated by user who called minlbfgsrequesttermination().
2296 X contains point which was "current accepted" when
2297 termination request was submitted.
2298 * Rep.IterationsCount contains iterations count
2299 * NFEV countains number of function calculations
2300
2301 -- ALGLIB --
2302 Copyright 02.04.2010 by Bochkanov Sergey
2303*************************************************************************/
2304void minlbfgsresults(const minlbfgsstate &state, real_1d_array &x, minlbfgsreport &rep);
2305
2306
2307/*************************************************************************
2308L-BFGS algorithm results
2309
2310Buffered implementation of MinLBFGSResults which uses pre-allocated buffer
2311to store X[]. If buffer size is too small, it resizes buffer. It is
2312intended to be used in the inner cycles of performance critical algorithms
2313where array reallocation penalty is too large to be ignored.
2314
2315 -- ALGLIB --
2316 Copyright 20.08.2010 by Bochkanov Sergey
2317*************************************************************************/
2318void minlbfgsresultsbuf(const minlbfgsstate &state, real_1d_array &x, minlbfgsreport &rep);
2319
2320
2321/*************************************************************************
2322This subroutine restarts LBFGS algorithm from new point. All optimization
2323parameters are left unchanged.
2324
2325This function allows to solve multiple optimization problems (which
2326must have same number of dimensions) without object reallocation penalty.
2327
2328INPUT PARAMETERS:
2329 State - structure used to store algorithm state
2330 X - new starting point.
2331
2332 -- ALGLIB --
2333 Copyright 30.07.2010 by Bochkanov Sergey
2334*************************************************************************/
2335void minlbfgsrestartfrom(const minlbfgsstate &state, const real_1d_array &x);
2336
2337
2338/*************************************************************************
2339This subroutine submits request for termination of running optimizer. It
2340should be called from user-supplied callback when user decides that it is
2341time to "smoothly" terminate optimization process. As result, optimizer
2342stops at point which was "current accepted" when termination request was
2343submitted and returns error code 8 (successful termination).
2344
2345INPUT PARAMETERS:
2346 State - optimizer structure
2347
2348NOTE: after request for termination optimizer may perform several
2349 additional calls to user-supplied callbacks. It does NOT guarantee
2350 to stop immediately - it just guarantees that these additional calls
2351 will be discarded later.
2352
2353NOTE: calling this function on optimizer which is NOT running will have no
2354 effect.
2355
2356NOTE: multiple calls to this function are possible. First call is counted,
2357 subsequent calls are silently ignored.
2358
2359 -- ALGLIB --
2360 Copyright 08.10.2014 by Bochkanov Sergey
2361*************************************************************************/
2362void minlbfgsrequesttermination(const minlbfgsstate &state);
2363
2364
2365/*************************************************************************
2366This subroutine turns on verification of the user-supplied analytic
2367gradient:
2368* user calls this subroutine before optimization begins
2369* MinLBFGSOptimize() is called
2370* prior to actual optimization, for each component of parameters being
2371 optimized X[i] algorithm performs following steps:
2372 * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
2373 where X[i] is i-th component of the initial point and S[i] is a scale
2374 of i-th parameter
2375 * if needed, steps are bounded with respect to constraints on X[]
2376 * F(X) is evaluated at these trial points
2377 * we perform one more evaluation in the middle point of the interval
2378 * we build cubic model using function values and derivatives at trial
2379 points and we compare its prediction with actual value in the middle
2380 point
2381 * in case difference between prediction and actual value is higher than
2382 some predetermined threshold, algorithm stops with completion code -7;
2383 Rep.VarIdx is set to index of the parameter with incorrect derivative.
2384* after verification is over, algorithm proceeds to the actual optimization.
2385
2386NOTE 1: verification needs N (parameters count) gradient evaluations. It
2387 is very costly and you should use it only for low dimensional
2388 problems, when you want to be sure that you've correctly
2389 calculated analytic derivatives. You should not use it in the
2390 production code (unless you want to check derivatives provided by
2391 some third party).
2392
2393NOTE 2: you should carefully choose TestStep. Value which is too large
2394 (so large that function behaviour is significantly non-cubic) will
2395 lead to false alarms. You may use different step for different
2396 parameters by means of setting scale with MinLBFGSSetScale().
2397
2398NOTE 3: this function may lead to false positives. In case it reports that
2399 I-th derivative was calculated incorrectly, you may decrease test
2400 step and try one more time - maybe your function changes too
2401 sharply and your step is too large for such rapidly chanding
2402 function.
2403
2404INPUT PARAMETERS:
2405 State - structure used to store algorithm state
2406 TestStep - verification step:
2407 * TestStep=0 turns verification off
2408 * TestStep>0 activates verification
2409
2410 -- ALGLIB --
2411 Copyright 24.05.2012 by Bochkanov Sergey
2412*************************************************************************/
2413void minlbfgssetgradientcheck(const minlbfgsstate &state, const double teststep);
2414
2415
2416
2417
2418
2419/*************************************************************************
2420 NONLINEAR CONJUGATE GRADIENT METHOD
2421
2422DESCRIPTION:
2423The subroutine minimizes function F(x) of N arguments by using one of the
2424nonlinear conjugate gradient methods.
2425
2426These CG methods are globally convergent (even on non-convex functions) as
2427long as grad(f) is Lipschitz continuous in a some neighborhood of the
2428L = { x : f(x)<=f(x0) }.
2429
2430
2431REQUIREMENTS:
2432Algorithm will request following information during its operation:
2433* function value F and its gradient G (simultaneously) at given point X
2434
2435
2436USAGE:
24371. User initializes algorithm state with MinCGCreate() call
24382. User tunes solver parameters with MinCGSetCond(), MinCGSetStpMax() and
2439 other functions
24403. User calls MinCGOptimize() function which takes algorithm state and
2441 pointer (delegate, etc.) to callback function which calculates F/G.
24424. User calls MinCGResults() to get solution
24435. Optionally, user may call MinCGRestartFrom() to solve another problem
2444 with same N but another starting point and/or another function.
2445 MinCGRestartFrom() allows to reuse already initialized structure.
2446
2447
2448INPUT PARAMETERS:
2449 N - problem dimension, N>0:
2450 * if given, only leading N elements of X are used
2451 * if not given, automatically determined from size of X
2452 X - starting point, array[0..N-1].
2453
2454OUTPUT PARAMETERS:
2455 State - structure which stores algorithm state
2456
2457 -- ALGLIB --
2458 Copyright 25.03.2010 by Bochkanov Sergey
2459*************************************************************************/
2460void mincgcreate(const ae_int_t n, const real_1d_array &x, mincgstate &state);
2461void mincgcreate(const real_1d_array &x, mincgstate &state);
2462
2463
2464/*************************************************************************
2465The subroutine is finite difference variant of MinCGCreate(). It uses
2466finite differences in order to differentiate target function.
2467
2468Description below contains information which is specific to this function
2469only. We recommend to read comments on MinCGCreate() in order to get more
2470information about creation of CG optimizer.
2471
2472INPUT PARAMETERS:
2473 N - problem dimension, N>0:
2474 * if given, only leading N elements of X are used
2475 * if not given, automatically determined from size of X
2476 X - starting point, array[0..N-1].
2477 DiffStep- differentiation step, >0
2478
2479OUTPUT PARAMETERS:
2480 State - structure which stores algorithm state
2481
2482NOTES:
24831. algorithm uses 4-point central formula for differentiation.
24842. differentiation step along I-th axis is equal to DiffStep*S[I] where
2485 S[] is scaling vector which can be set by MinCGSetScale() call.
24863. we recommend you to use moderate values of differentiation step. Too
2487 large step will result in too large truncation errors, while too small
2488 step will result in too large numerical errors. 1.0E-6 can be good
2489 value to start with.
24904. Numerical differentiation is very inefficient - one gradient
2491 calculation needs 4*N function evaluations. This function will work for
2492 any N - either small (1...10), moderate (10...100) or large (100...).
2493 However, performance penalty will be too severe for any N's except for
2494 small ones.
2495 We should also say that code which relies on numerical differentiation
2496 is less robust and precise. L-BFGS needs exact gradient values.
2497 Imprecise gradient may slow down convergence, especially on highly
2498 nonlinear problems.
2499 Thus we recommend to use this function for fast prototyping on small-
2500 dimensional problems only, and to implement analytical gradient as soon
2501 as possible.
2502
2503 -- ALGLIB --
2504 Copyright 16.05.2011 by Bochkanov Sergey
2505*************************************************************************/
2506void mincgcreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, mincgstate &state);
2507void mincgcreatef(const real_1d_array &x, const double diffstep, mincgstate &state);
2508
2509
2510/*************************************************************************
2511This function sets stopping conditions for CG optimization algorithm.
2512
2513INPUT PARAMETERS:
2514 State - structure which stores algorithm state
2515 EpsG - >=0
2516 The subroutine finishes its work if the condition
2517 |v|<EpsG is satisfied, where:
2518 * |.| means Euclidian norm
2519 * v - scaled gradient vector, v[i]=g[i]*s[i]
2520 * g - gradient
2521 * s - scaling coefficients set by MinCGSetScale()
2522 EpsF - >=0
2523 The subroutine finishes its work if on k+1-th iteration
2524 the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
2525 is satisfied.
2526 EpsX - >=0
2527 The subroutine finishes its work if on k+1-th iteration
2528 the condition |v|<=EpsX is fulfilled, where:
2529 * |.| means Euclidian norm
2530 * v - scaled step vector, v[i]=dx[i]/s[i]
2531 * dx - ste pvector, dx=X(k+1)-X(k)
2532 * s - scaling coefficients set by MinCGSetScale()
2533 MaxIts - maximum number of iterations. If MaxIts=0, the number of
2534 iterations is unlimited.
2535
2536Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
2537automatic stopping criterion selection (small EpsX).
2538
2539 -- ALGLIB --
2540 Copyright 02.04.2010 by Bochkanov Sergey
2541*************************************************************************/
2542void mincgsetcond(const mincgstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
2543
2544
2545/*************************************************************************
2546This function sets scaling coefficients for CG optimizer.
2547
2548ALGLIB optimizers use scaling matrices to test stopping conditions (step
2549size and gradient are scaled before comparison with tolerances). Scale of
2550the I-th variable is a translation invariant measure of:
2551a) "how large" the variable is
2552b) how large the step should be to make significant changes in the function
2553
2554Scaling is also used by finite difference variant of CG optimizer - step
2555along I-th axis is equal to DiffStep*S[I].
2556
2557In most optimizers (and in the CG too) scaling is NOT a form of
2558preconditioning. It just affects stopping conditions. You should set
2559preconditioner by separate call to one of the MinCGSetPrec...() functions.
2560
2561There is special preconditioning mode, however, which uses scaling
2562coefficients to form diagonal preconditioning matrix. You can turn this
2563mode on, if you want. But you should understand that scaling is not the
2564same thing as preconditioning - these are two different, although related
2565forms of tuning solver.
2566
2567INPUT PARAMETERS:
2568 State - structure stores algorithm state
2569 S - array[N], non-zero scaling coefficients
2570 S[i] may be negative, sign doesn't matter.
2571
2572 -- ALGLIB --
2573 Copyright 14.01.2011 by Bochkanov Sergey
2574*************************************************************************/
2575void mincgsetscale(const mincgstate &state, const real_1d_array &s);
2576
2577
2578/*************************************************************************
2579This function turns on/off reporting.
2580
2581INPUT PARAMETERS:
2582 State - structure which stores algorithm state
2583 NeedXRep- whether iteration reports are needed or not
2584
2585If NeedXRep is True, algorithm will call rep() callback function if it is
2586provided to MinCGOptimize().
2587
2588 -- ALGLIB --
2589 Copyright 02.04.2010 by Bochkanov Sergey
2590*************************************************************************/
2591void mincgsetxrep(const mincgstate &state, const bool needxrep);
2592
2593
2594/*************************************************************************
2595This function sets CG algorithm.
2596
2597INPUT PARAMETERS:
2598 State - structure which stores algorithm state
2599 CGType - algorithm type:
2600 * -1 automatic selection of the best algorithm
2601 * 0 DY (Dai and Yuan) algorithm
2602 * 1 Hybrid DY-HS algorithm
2603
2604 -- ALGLIB --
2605 Copyright 02.04.2010 by Bochkanov Sergey
2606*************************************************************************/
2607void mincgsetcgtype(const mincgstate &state, const ae_int_t cgtype);
2608
2609
2610/*************************************************************************
2611This function sets maximum step length
2612
2613INPUT PARAMETERS:
2614 State - structure which stores algorithm state
2615 StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't
2616 want to limit step length.
2617
2618Use this subroutine when you optimize target function which contains exp()
2619or other fast growing functions, and optimization algorithm makes too
2620large steps which leads to overflow. This function allows us to reject
2621steps that are too large (and therefore expose us to the possible
2622overflow) without actually calculating function value at the x+stp*d.
2623
2624 -- ALGLIB --
2625 Copyright 02.04.2010 by Bochkanov Sergey
2626*************************************************************************/
2627void mincgsetstpmax(const mincgstate &state, const double stpmax);
2628
2629
2630/*************************************************************************
2631This function allows to suggest initial step length to the CG algorithm.
2632
2633Suggested step length is used as starting point for the line search. It
2634can be useful when you have badly scaled problem, i.e. when ||grad||
2635(which is used as initial estimate for the first step) is many orders of
2636magnitude different from the desired step.
2637
2638Line search may fail on such problems without good estimate of initial
2639step length. Imagine, for example, problem with ||grad||=10^50 and desired
2640step equal to 0.1 Line search function will use 10^50 as initial step,
2641then it will decrease step length by 2 (up to 20 attempts) and will get
264210^44, which is still too large.
2643
2644This function allows us to tell than line search should be started from
2645some moderate step length, like 1.0, so algorithm will be able to detect
2646desired step length in a several searches.
2647
2648Default behavior (when no step is suggested) is to use preconditioner, if
2649it is available, to generate initial estimate of step length.
2650
2651This function influences only first iteration of algorithm. It should be
2652called between MinCGCreate/MinCGRestartFrom() call and MinCGOptimize call.
2653Suggested step is ignored if you have preconditioner.
2654
2655INPUT PARAMETERS:
2656 State - structure used to store algorithm state.
2657 Stp - initial estimate of the step length.
2658 Can be zero (no estimate).
2659
2660 -- ALGLIB --
2661 Copyright 30.07.2010 by Bochkanov Sergey
2662*************************************************************************/
2663void mincgsuggeststep(const mincgstate &state, const double stp);
2664
2665
2666/*************************************************************************
2667Modification of the preconditioner: preconditioning is turned off.
2668
2669INPUT PARAMETERS:
2670 State - structure which stores algorithm state
2671
2672NOTE: you can change preconditioner "on the fly", during algorithm
2673iterations.
2674
2675 -- ALGLIB --
2676 Copyright 13.10.2010 by Bochkanov Sergey
2677*************************************************************************/
2678void mincgsetprecdefault(const mincgstate &state);
2679
2680
2681/*************************************************************************
2682Modification of the preconditioner: diagonal of approximate Hessian is
2683used.
2684
2685INPUT PARAMETERS:
2686 State - structure which stores algorithm state
2687 D - diagonal of the approximate Hessian, array[0..N-1],
2688 (if larger, only leading N elements are used).
2689
2690NOTE: you can change preconditioner "on the fly", during algorithm
2691iterations.
2692
2693NOTE 2: D[i] should be positive. Exception will be thrown otherwise.
2694
2695NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
2696
2697 -- ALGLIB --
2698 Copyright 13.10.2010 by Bochkanov Sergey
2699*************************************************************************/
2700void mincgsetprecdiag(const mincgstate &state, const real_1d_array &d);
2701
2702
2703/*************************************************************************
2704Modification of the preconditioner: scale-based diagonal preconditioning.
2705
2706This preconditioning mode can be useful when you don't have approximate
2707diagonal of Hessian, but you know that your variables are badly scaled
2708(for example, one variable is in [1,10], and another in [1000,100000]),
2709and most part of the ill-conditioning comes from different scales of vars.
2710
2711In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2),
2712can greatly improve convergence.
2713
2714IMPRTANT: you should set scale of your variables with MinCGSetScale() call
2715(before or after MinCGSetPrecScale() call). Without knowledge of the scale
2716of your variables scale-based preconditioner will be just unit matrix.
2717
2718INPUT PARAMETERS:
2719 State - structure which stores algorithm state
2720
2721NOTE: you can change preconditioner "on the fly", during algorithm
2722iterations.
2723
2724 -- ALGLIB --
2725 Copyright 13.10.2010 by Bochkanov Sergey
2726*************************************************************************/
2727void mincgsetprecscale(const mincgstate &state);
2728
2729
2730/*************************************************************************
2731This function provides reverse communication interface
2732Reverse communication interface is not documented or recommended to use.
2733See below for functions which provide better documented API
2734*************************************************************************/
2735bool mincgiteration(const mincgstate &state);
2736
2737
2738/*************************************************************************
2739This family of functions is used to launcn iterations of nonlinear optimizer
2740
2741These functions accept following parameters:
2742 state - algorithm state
2743 func - callback which calculates function (or merit function)
2744 value func at given point x
2745 grad - callback which calculates function (or merit function)
2746 value func and gradient grad at given point x
2747 rep - optional callback which is called after each iteration
2748 can be NULL
2749 ptr - optional pointer which is passed to func/grad/hess/jac/rep
2750 can be NULL
2751
2752NOTES:
2753
27541. This function has two different implementations: one which uses exact
2755 (analytical) user-supplied gradient, and one which uses function value
2756 only and numerically differentiates function in order to obtain
2757 gradient.
2758
2759 Depending on the specific function used to create optimizer object
2760 (either MinCGCreate() for analytical gradient or MinCGCreateF() for
2761 numerical differentiation) you should choose appropriate variant of
2762 MinCGOptimize() - one which accepts function AND gradient or one which
2763 accepts function ONLY.
2764
2765 Be careful to choose variant of MinCGOptimize() which corresponds to
2766 your optimization scheme! Table below lists different combinations of
2767 callback (function/gradient) passed to MinCGOptimize() and specific
2768 function used to create optimizer.
2769
2770
2771 | USER PASSED TO MinCGOptimize()
2772 CREATED WITH | function only | function and gradient
2773 ------------------------------------------------------------
2774 MinCGCreateF() | work FAIL
2775 MinCGCreate() | FAIL work
2776
2777 Here "FAIL" denotes inappropriate combinations of optimizer creation
2778 function and MinCGOptimize() version. Attemps to use such combination
2779 (for example, to create optimizer with MinCGCreateF() and to pass
2780 gradient information to MinCGOptimize()) will lead to exception being
2781 thrown. Either you did not pass gradient when it WAS needed or you
2782 passed gradient when it was NOT needed.
2783
2784 -- ALGLIB --
2785 Copyright 20.04.2009 by Bochkanov Sergey
2786
2787*************************************************************************/
2788void mincgoptimize(mincgstate &state,
2789 void (*func)(const real_1d_array &x, double &func, void *ptr),
2790 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
2791 void *ptr = NULL);
2792void mincgoptimize(mincgstate &state,
2793 void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
2794 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
2795 void *ptr = NULL);
2796
2797
2798/*************************************************************************
2799Conjugate gradient results
2800
2801INPUT PARAMETERS:
2802 State - algorithm state
2803
2804OUTPUT PARAMETERS:
2805 X - array[0..N-1], solution
2806 Rep - optimization report:
2807 * Rep.TerminationType completetion code:
2808 * -8 internal integrity control detected infinite
2809 or NAN values in function/gradient. Abnormal
2810 termination signalled.
2811 * -7 gradient verification failed.
2812 See MinCGSetGradientCheck() for more information.
2813 * 1 relative function improvement is no more than
2814 EpsF.
2815 * 2 relative step is no more than EpsX.
2816 * 4 gradient norm is no more than EpsG
2817 * 5 MaxIts steps was taken
2818 * 7 stopping conditions are too stringent,
2819 further improvement is impossible,
2820 we return best X found so far
2821 * 8 terminated by user
2822 * Rep.IterationsCount contains iterations count
2823 * NFEV countains number of function calculations
2824
2825 -- ALGLIB --
2826 Copyright 20.04.2009 by Bochkanov Sergey
2827*************************************************************************/
2828void mincgresults(const mincgstate &state, real_1d_array &x, mincgreport &rep);
2829
2830
2831/*************************************************************************
2832Conjugate gradient results
2833
2834Buffered implementation of MinCGResults(), which uses pre-allocated buffer
2835to store X[]. If buffer size is too small, it resizes buffer. It is
2836intended to be used in the inner cycles of performance critical algorithms
2837where array reallocation penalty is too large to be ignored.
2838
2839 -- ALGLIB --
2840 Copyright 20.04.2009 by Bochkanov Sergey
2841*************************************************************************/
2842void mincgresultsbuf(const mincgstate &state, real_1d_array &x, mincgreport &rep);
2843
2844
2845/*************************************************************************
2846This subroutine restarts CG algorithm from new point. All optimization
2847parameters are left unchanged.
2848
2849This function allows to solve multiple optimization problems (which
2850must have same number of dimensions) without object reallocation penalty.
2851
2852INPUT PARAMETERS:
2853 State - structure used to store algorithm state.
2854 X - new starting point.
2855
2856 -- ALGLIB --
2857 Copyright 30.07.2010 by Bochkanov Sergey
2858*************************************************************************/
2859void mincgrestartfrom(const mincgstate &state, const real_1d_array &x);
2860
2861
2862/*************************************************************************
2863This subroutine submits request for termination of running optimizer. It
2864should be called from user-supplied callback when user decides that it is
2865time to "smoothly" terminate optimization process. As result, optimizer
2866stops at point which was "current accepted" when termination request was
2867submitted and returns error code 8 (successful termination).
2868
2869INPUT PARAMETERS:
2870 State - optimizer structure
2871
2872NOTE: after request for termination optimizer may perform several
2873 additional calls to user-supplied callbacks. It does NOT guarantee
2874 to stop immediately - it just guarantees that these additional calls
2875 will be discarded later.
2876
2877NOTE: calling this function on optimizer which is NOT running will have no
2878 effect.
2879
2880NOTE: multiple calls to this function are possible. First call is counted,
2881 subsequent calls are silently ignored.
2882
2883 -- ALGLIB --
2884 Copyright 08.10.2014 by Bochkanov Sergey
2885*************************************************************************/
2886void mincgrequesttermination(const mincgstate &state);
2887
2888
2889/*************************************************************************
2890
2891This subroutine turns on verification of the user-supplied analytic
2892gradient:
2893* user calls this subroutine before optimization begins
2894* MinCGOptimize() is called
2895* prior to actual optimization, for each component of parameters being
2896 optimized X[i] algorithm performs following steps:
2897 * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
2898 where X[i] is i-th component of the initial point and S[i] is a scale
2899 of i-th parameter
2900 * F(X) is evaluated at these trial points
2901 * we perform one more evaluation in the middle point of the interval
2902 * we build cubic model using function values and derivatives at trial
2903 points and we compare its prediction with actual value in the middle
2904 point
2905 * in case difference between prediction and actual value is higher than
2906 some predetermined threshold, algorithm stops with completion code -7;
2907 Rep.VarIdx is set to index of the parameter with incorrect derivative.
2908* after verification is over, algorithm proceeds to the actual optimization.
2909
2910NOTE 1: verification needs N (parameters count) gradient evaluations. It
2911 is very costly and you should use it only for low dimensional
2912 problems, when you want to be sure that you've correctly
2913 calculated analytic derivatives. You should not use it in the
2914 production code (unless you want to check derivatives provided by
2915 some third party).
2916
2917NOTE 2: you should carefully choose TestStep. Value which is too large
2918 (so large that function behaviour is significantly non-cubic) will
2919 lead to false alarms. You may use different step for different
2920 parameters by means of setting scale with MinCGSetScale().
2921
2922NOTE 3: this function may lead to false positives. In case it reports that
2923 I-th derivative was calculated incorrectly, you may decrease test
2924 step and try one more time - maybe your function changes too
2925 sharply and your step is too large for such rapidly chanding
2926 function.
2927
2928INPUT PARAMETERS:
2929 State - structure used to store algorithm state
2930 TestStep - verification step:
2931 * TestStep=0 turns verification off
2932 * TestStep>0 activates verification
2933
2934 -- ALGLIB --
2935 Copyright 31.05.2012 by Bochkanov Sergey
2936*************************************************************************/
2937void mincgsetgradientcheck(const mincgstate &state, const double teststep);
2938
2939/*************************************************************************
2940 BOUND CONSTRAINED OPTIMIZATION
2941 WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS
2942
2943DESCRIPTION:
2944The subroutine minimizes function F(x) of N arguments subject to any
2945combination of:
2946* bound constraints
2947* linear inequality constraints
2948* linear equality constraints
2949
2950REQUIREMENTS:
2951* user must provide function value and gradient
2952* starting point X0 must be feasible or
2953 not too far away from the feasible set
2954* grad(f) must be Lipschitz continuous on a level set:
2955 L = { x : f(x)<=f(x0) }
2956* function must be defined everywhere on the feasible set F
2957
2958USAGE:
2959
2960Constrained optimization if far more complex than the unconstrained one.
2961Here we give very brief outline of the BLEIC optimizer. We strongly recommend
2962you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide
2963on optimization, which is available at http://www.alglib.net/optimization/
2964
29651. User initializes algorithm state with MinBLEICCreate() call
2966
29672. USer adds boundary and/or linear constraints by calling
2968 MinBLEICSetBC() and MinBLEICSetLC() functions.
2969
29703. User sets stopping conditions with MinBLEICSetCond().
2971
29724. User calls MinBLEICOptimize() function which takes algorithm state and
2973 pointer (delegate, etc.) to callback function which calculates F/G.
2974
29755. User calls MinBLEICResults() to get solution
2976
29776. Optionally user may call MinBLEICRestartFrom() to solve another problem
2978 with same N but another starting point.
2979 MinBLEICRestartFrom() allows to reuse already initialized structure.
2980
2981NOTE: if you have box-only constraints (no general linear constraints),
2982 then MinBC optimizer can be better option. It uses special, faster
2983 constraint activation method, which performs better on problems with
2984 multiple constraints active at the solution.
2985
2986 On small-scale problems performance of MinBC is similar to that of
2987 MinBLEIC, but on large-scale ones (hundreds and thousands of active
2988 constraints) it can be several times faster than MinBLEIC.
2989
2990INPUT PARAMETERS:
2991 N - problem dimension, N>0:
2992 * if given, only leading N elements of X are used
2993 * if not given, automatically determined from size ofX
2994 X - starting point, array[N]:
2995 * it is better to set X to a feasible point
2996 * but X can be infeasible, in which case algorithm will try
2997 to find feasible point first, using X as initial
2998 approximation.
2999
3000OUTPUT PARAMETERS:
3001 State - structure stores algorithm state
3002
3003 -- ALGLIB --
3004 Copyright 28.11.2010 by Bochkanov Sergey
3005*************************************************************************/
3006void minbleiccreate(const ae_int_t n, const real_1d_array &x, minbleicstate &state);
3007void minbleiccreate(const real_1d_array &x, minbleicstate &state);
3008
3009
3010/*************************************************************************
3011The subroutine is finite difference variant of MinBLEICCreate(). It uses
3012finite differences in order to differentiate target function.
3013
3014Description below contains information which is specific to this function
3015only. We recommend to read comments on MinBLEICCreate() in order to get
3016more information about creation of BLEIC optimizer.
3017
3018INPUT PARAMETERS:
3019 N - problem dimension, N>0:
3020 * if given, only leading N elements of X are used
3021 * if not given, automatically determined from size of X
3022 X - starting point, array[0..N-1].
3023 DiffStep- differentiation step, >0
3024
3025OUTPUT PARAMETERS:
3026 State - structure which stores algorithm state
3027
3028NOTES:
30291. algorithm uses 4-point central formula for differentiation.
30302. differentiation step along I-th axis is equal to DiffStep*S[I] where
3031 S[] is scaling vector which can be set by MinBLEICSetScale() call.
30323. we recommend you to use moderate values of differentiation step. Too
3033 large step will result in too large truncation errors, while too small
3034 step will result in too large numerical errors. 1.0E-6 can be good
3035 value to start with.
30364. Numerical differentiation is very inefficient - one gradient
3037 calculation needs 4*N function evaluations. This function will work for
3038 any N - either small (1...10), moderate (10...100) or large (100...).
3039 However, performance penalty will be too severe for any N's except for
3040 small ones.
3041 We should also say that code which relies on numerical differentiation
3042 is less robust and precise. CG needs exact gradient values. Imprecise
3043 gradient may slow down convergence, especially on highly nonlinear
3044 problems.
3045 Thus we recommend to use this function for fast prototyping on small-
3046 dimensional problems only, and to implement analytical gradient as soon
3047 as possible.
3048
3049 -- ALGLIB --
3050 Copyright 16.05.2011 by Bochkanov Sergey
3051*************************************************************************/
3052void minbleiccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minbleicstate &state);
3053void minbleiccreatef(const real_1d_array &x, const double diffstep, minbleicstate &state);
3054
3055
3056/*************************************************************************
3057This function sets boundary constraints for BLEIC optimizer.
3058
3059Boundary constraints are inactive by default (after initial creation).
3060They are preserved after algorithm restart with MinBLEICRestartFrom().
3061
3062NOTE: if you have box-only constraints (no general linear constraints),
3063 then MinBC optimizer can be better option. It uses special, faster
3064 constraint activation method, which performs better on problems with
3065 multiple constraints active at the solution.
3066
3067 On small-scale problems performance of MinBC is similar to that of
3068 MinBLEIC, but on large-scale ones (hundreds and thousands of active
3069 constraints) it can be several times faster than MinBLEIC.
3070
3071INPUT PARAMETERS:
3072 State - structure stores algorithm state
3073 BndL - lower bounds, array[N].
3074 If some (all) variables are unbounded, you may specify
3075 very small number or -INF.
3076 BndU - upper bounds, array[N].
3077 If some (all) variables are unbounded, you may specify
3078 very large number or +INF.
3079
3080NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
3081variable will be "frozen" at X[i]=BndL[i]=BndU[i].
3082
3083NOTE 2: this solver has following useful properties:
3084* bound constraints are always satisfied exactly
3085* function is evaluated only INSIDE area specified by bound constraints,
3086 even when numerical differentiation is used (algorithm adjusts nodes
3087 according to boundary constraints)
3088
3089 -- ALGLIB --
3090 Copyright 28.11.2010 by Bochkanov Sergey
3091*************************************************************************/
3092void minbleicsetbc(const minbleicstate &state, const real_1d_array &bndl, const real_1d_array &bndu);
3093
3094
3095/*************************************************************************
3096This function sets linear constraints for BLEIC optimizer.
3097
3098Linear constraints are inactive by default (after initial creation).
3099They are preserved after algorithm restart with MinBLEICRestartFrom().
3100
3101INPUT PARAMETERS:
3102 State - structure previously allocated with MinBLEICCreate call.
3103 C - linear constraints, array[K,N+1].
3104 Each row of C represents one constraint, either equality
3105 or inequality (see below):
3106 * first N elements correspond to coefficients,
3107 * last element corresponds to the right part.
3108 All elements of C (including right part) must be finite.
3109 CT - type of constraints, array[K]:
3110 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
3111 * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
3112 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
3113 K - number of equality/inequality constraints, K>=0:
3114 * if given, only leading K elements of C/CT are used
3115 * if not given, automatically determined from sizes of C/CT
3116
3117NOTE 1: linear (non-bound) constraints are satisfied only approximately:
3118* there always exists some minor violation (about Epsilon in magnitude)
3119 due to rounding errors
3120* numerical differentiation, if used, may lead to function evaluations
3121 outside of the feasible area, because algorithm does NOT change
3122 numerical differentiation formula according to linear constraints.
3123If you want constraints to be satisfied exactly, try to reformulate your
3124problem in such manner that all constraints will become boundary ones
3125(this kind of constraints is always satisfied exactly, both in the final
3126solution and in all intermediate points).
3127
3128 -- ALGLIB --
3129 Copyright 28.11.2010 by Bochkanov Sergey
3130*************************************************************************/
3131void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k);
3132void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, const integer_1d_array &ct);
3133
3134
3135/*************************************************************************
3136This function sets stopping conditions for the optimizer.
3137
3138INPUT PARAMETERS:
3139 State - structure which stores algorithm state
3140 EpsG - >=0
3141 The subroutine finishes its work if the condition
3142 |v|<EpsG is satisfied, where:
3143 * |.| means Euclidian norm
3144 * v - scaled gradient vector, v[i]=g[i]*s[i]
3145 * g - gradient
3146 * s - scaling coefficients set by MinBLEICSetScale()
3147 EpsF - >=0
3148 The subroutine finishes its work if on k+1-th iteration
3149 the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
3150 is satisfied.
3151 EpsX - >=0
3152 The subroutine finishes its work if on k+1-th iteration
3153 the condition |v|<=EpsX is fulfilled, where:
3154 * |.| means Euclidian norm
3155 * v - scaled step vector, v[i]=dx[i]/s[i]
3156 * dx - step vector, dx=X(k+1)-X(k)
3157 * s - scaling coefficients set by MinBLEICSetScale()
3158 MaxIts - maximum number of iterations. If MaxIts=0, the number of
3159 iterations is unlimited.
3160
3161Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
3162to automatic stopping criterion selection.
3163
3164NOTE: when SetCond() called with non-zero MaxIts, BLEIC solver may perform
3165 slightly more than MaxIts iterations. I.e., MaxIts sets non-strict
3166 limit on iterations count.
3167
3168 -- ALGLIB --
3169 Copyright 28.11.2010 by Bochkanov Sergey
3170*************************************************************************/
3171void minbleicsetcond(const minbleicstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
3172
3173
3174/*************************************************************************
3175This function sets scaling coefficients for BLEIC optimizer.
3176
3177ALGLIB optimizers use scaling matrices to test stopping conditions (step
3178size and gradient are scaled before comparison with tolerances). Scale of
3179the I-th variable is a translation invariant measure of:
3180a) "how large" the variable is
3181b) how large the step should be to make significant changes in the function
3182
3183Scaling is also used by finite difference variant of the optimizer - step
3184along I-th axis is equal to DiffStep*S[I].
3185
3186In most optimizers (and in the BLEIC too) scaling is NOT a form of
3187preconditioning. It just affects stopping conditions. You should set
3188preconditioner by separate call to one of the MinBLEICSetPrec...()
3189functions.
3190
3191There is a special preconditioning mode, however, which uses scaling
3192coefficients to form diagonal preconditioning matrix. You can turn this
3193mode on, if you want. But you should understand that scaling is not the
3194same thing as preconditioning - these are two different, although related
3195forms of tuning solver.
3196
3197INPUT PARAMETERS:
3198 State - structure stores algorithm state
3199 S - array[N], non-zero scaling coefficients
3200 S[i] may be negative, sign doesn't matter.
3201
3202 -- ALGLIB --
3203 Copyright 14.01.2011 by Bochkanov Sergey
3204*************************************************************************/
3205void minbleicsetscale(const minbleicstate &state, const real_1d_array &s);
3206
3207
3208/*************************************************************************
3209Modification of the preconditioner: preconditioning is turned off.
3210
3211INPUT PARAMETERS:
3212 State - structure which stores algorithm state
3213
3214 -- ALGLIB --
3215 Copyright 13.10.2010 by Bochkanov Sergey
3216*************************************************************************/
3217void minbleicsetprecdefault(const minbleicstate &state);
3218
3219
3220/*************************************************************************
3221Modification of the preconditioner: diagonal of approximate Hessian is
3222used.
3223
3224INPUT PARAMETERS:
3225 State - structure which stores algorithm state
3226 D - diagonal of the approximate Hessian, array[0..N-1],
3227 (if larger, only leading N elements are used).
3228
3229NOTE 1: D[i] should be positive. Exception will be thrown otherwise.
3230
3231NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
3232
3233 -- ALGLIB --
3234 Copyright 13.10.2010 by Bochkanov Sergey
3235*************************************************************************/
3236void minbleicsetprecdiag(const minbleicstate &state, const real_1d_array &d);
3237
3238
3239/*************************************************************************
3240Modification of the preconditioner: scale-based diagonal preconditioning.
3241
3242This preconditioning mode can be useful when you don't have approximate
3243diagonal of Hessian, but you know that your variables are badly scaled
3244(for example, one variable is in [1,10], and another in [1000,100000]),
3245and most part of the ill-conditioning comes from different scales of vars.
3246
3247In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2),
3248can greatly improve convergence.
3249
3250IMPRTANT: you should set scale of your variables with MinBLEICSetScale()
3251call (before or after MinBLEICSetPrecScale() call). Without knowledge of
3252the scale of your variables scale-based preconditioner will be just unit
3253matrix.
3254
3255INPUT PARAMETERS:
3256 State - structure which stores algorithm state
3257
3258 -- ALGLIB --
3259 Copyright 13.10.2010 by Bochkanov Sergey
3260*************************************************************************/
3261void minbleicsetprecscale(const minbleicstate &state);
3262
3263
3264/*************************************************************************
3265This function turns on/off reporting.
3266
3267INPUT PARAMETERS:
3268 State - structure which stores algorithm state
3269 NeedXRep- whether iteration reports are needed or not
3270
3271If NeedXRep is True, algorithm will call rep() callback function if it is
3272provided to MinBLEICOptimize().
3273
3274 -- ALGLIB --
3275 Copyright 28.11.2010 by Bochkanov Sergey
3276*************************************************************************/
3277void minbleicsetxrep(const minbleicstate &state, const bool needxrep);
3278
3279
3280/*************************************************************************
3281This function sets maximum step length
3282
3283IMPORTANT: this feature is hard to combine with preconditioning. You can't
3284set upper limit on step length, when you solve optimization problem with
3285linear (non-boundary) constraints AND preconditioner turned on.
3286
3287When non-boundary constraints are present, you have to either a) use
3288preconditioner, or b) use upper limit on step length. YOU CAN'T USE BOTH!
3289In this case algorithm will terminate with appropriate error code.
3290
3291INPUT PARAMETERS:
3292 State - structure which stores algorithm state
3293 StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't
3294 want to limit step length.
3295
3296Use this subroutine when you optimize target function which contains exp()
3297or other fast growing functions, and optimization algorithm makes too
3298large steps which lead to overflow. This function allows us to reject
3299steps that are too large (and therefore expose us to the possible
3300overflow) without actually calculating function value at the x+stp*d.
3301
3302 -- ALGLIB --
3303 Copyright 02.04.2010 by Bochkanov Sergey
3304*************************************************************************/
3305void minbleicsetstpmax(const minbleicstate &state, const double stpmax);
3306
3307
3308/*************************************************************************
3309This function provides reverse communication interface
3310Reverse communication interface is not documented or recommended to use.
3311See below for functions which provide better documented API
3312*************************************************************************/
3313bool minbleiciteration(const minbleicstate &state);
3314
3315
3316/*************************************************************************
3317This family of functions is used to launcn iterations of nonlinear optimizer
3318
3319These functions accept following parameters:
3320 state - algorithm state
3321 func - callback which calculates function (or merit function)
3322 value func at given point x
3323 grad - callback which calculates function (or merit function)
3324 value func and gradient grad at given point x
3325 rep - optional callback which is called after each iteration
3326 can be NULL
3327 ptr - optional pointer which is passed to func/grad/hess/jac/rep
3328 can be NULL
3329
3330NOTES:
3331
33321. This function has two different implementations: one which uses exact
3333 (analytical) user-supplied gradient, and one which uses function value
3334 only and numerically differentiates function in order to obtain
3335 gradient.
3336
3337 Depending on the specific function used to create optimizer object
3338 (either MinBLEICCreate() for analytical gradient or MinBLEICCreateF()
3339 for numerical differentiation) you should choose appropriate variant of
3340 MinBLEICOptimize() - one which accepts function AND gradient or one
3341 which accepts function ONLY.
3342
3343 Be careful to choose variant of MinBLEICOptimize() which corresponds to
3344 your optimization scheme! Table below lists different combinations of
3345 callback (function/gradient) passed to MinBLEICOptimize() and specific
3346 function used to create optimizer.
3347
3348
3349 | USER PASSED TO MinBLEICOptimize()
3350 CREATED WITH | function only | function and gradient
3351 ------------------------------------------------------------
3352 MinBLEICCreateF() | work FAIL
3353 MinBLEICCreate() | FAIL work
3354
3355 Here "FAIL" denotes inappropriate combinations of optimizer creation
3356 function and MinBLEICOptimize() version. Attemps to use such
3357 combination (for example, to create optimizer with MinBLEICCreateF()
3358 and to pass gradient information to MinCGOptimize()) will lead to
3359 exception being thrown. Either you did not pass gradient when it WAS
3360 needed or you passed gradient when it was NOT needed.
3361
3362 -- ALGLIB --
3363 Copyright 28.11.2010 by Bochkanov Sergey
3364
3365*************************************************************************/
3366void minbleicoptimize(minbleicstate &state,
3367 void (*func)(const real_1d_array &x, double &func, void *ptr),
3368 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
3369 void *ptr = NULL);
3370void minbleicoptimize(minbleicstate &state,
3371 void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
3372 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
3373 void *ptr = NULL);
3374
3375
3376/*************************************************************************
3377BLEIC results
3378
3379INPUT PARAMETERS:
3380 State - algorithm state
3381
3382OUTPUT PARAMETERS:
3383 X - array[0..N-1], solution
3384 Rep - optimization report. You should check Rep.TerminationType
3385 in order to distinguish successful termination from
3386 unsuccessful one:
3387 * -8 internal integrity control detected infinite or
3388 NAN values in function/gradient. Abnormal
3389 termination signalled.
3390 * -7 gradient verification failed.
3391 See MinBLEICSetGradientCheck() for more information.
3392 * -3 inconsistent constraints. Feasible point is
3393 either nonexistent or too hard to find. Try to
3394 restart optimizer with better initial approximation
3395 * 1 relative function improvement is no more than EpsF.
3396 * 2 scaled step is no more than EpsX.
3397 * 4 scaled gradient norm is no more than EpsG.
3398 * 5 MaxIts steps was taken
3399 * 8 terminated by user who called minbleicrequesttermination().
3400 X contains point which was "current accepted" when
3401 termination request was submitted.
3402 More information about fields of this structure can be
3403 found in the comments on MinBLEICReport datatype.
3404
3405 -- ALGLIB --
3406 Copyright 28.11.2010 by Bochkanov Sergey
3407*************************************************************************/
3408void minbleicresults(const minbleicstate &state, real_1d_array &x, minbleicreport &rep);
3409
3410
3411/*************************************************************************
3412BLEIC results
3413
3414Buffered implementation of MinBLEICResults() which uses pre-allocated buffer
3415to store X[]. If buffer size is too small, it resizes buffer. It is
3416intended to be used in the inner cycles of performance critical algorithms
3417where array reallocation penalty is too large to be ignored.
3418
3419 -- ALGLIB --
3420 Copyright 28.11.2010 by Bochkanov Sergey
3421*************************************************************************/
3422void minbleicresultsbuf(const minbleicstate &state, real_1d_array &x, minbleicreport &rep);
3423
3424
3425/*************************************************************************
3426This subroutine restarts algorithm from new point.
3427All optimization parameters (including constraints) are left unchanged.
3428
3429This function allows to solve multiple optimization problems (which
3430must have same number of dimensions) without object reallocation penalty.
3431
3432INPUT PARAMETERS:
3433 State - structure previously allocated with MinBLEICCreate call.
3434 X - new starting point.
3435
3436 -- ALGLIB --
3437 Copyright 28.11.2010 by Bochkanov Sergey
3438*************************************************************************/
3439void minbleicrestartfrom(const minbleicstate &state, const real_1d_array &x);
3440
3441
3442/*************************************************************************
3443This subroutine submits request for termination of running optimizer. It
3444should be called from user-supplied callback when user decides that it is
3445time to "smoothly" terminate optimization process. As result, optimizer
3446stops at point which was "current accepted" when termination request was
3447submitted and returns error code 8 (successful termination).
3448
3449INPUT PARAMETERS:
3450 State - optimizer structure
3451
3452NOTE: after request for termination optimizer may perform several
3453 additional calls to user-supplied callbacks. It does NOT guarantee
3454 to stop immediately - it just guarantees that these additional calls
3455 will be discarded later.
3456
3457NOTE: calling this function on optimizer which is NOT running will have no
3458 effect.
3459
3460NOTE: multiple calls to this function are possible. First call is counted,
3461 subsequent calls are silently ignored.
3462
3463 -- ALGLIB --
3464 Copyright 08.10.2014 by Bochkanov Sergey
3465*************************************************************************/
3466void minbleicrequesttermination(const minbleicstate &state);
3467
3468
3469/*************************************************************************
3470This subroutine turns on verification of the user-supplied analytic
3471gradient:
3472* user calls this subroutine before optimization begins
3473* MinBLEICOptimize() is called
3474* prior to actual optimization, for each component of parameters being
3475 optimized X[i] algorithm performs following steps:
3476 * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
3477 where X[i] is i-th component of the initial point and S[i] is a scale
3478 of i-th parameter
3479 * if needed, steps are bounded with respect to constraints on X[]
3480 * F(X) is evaluated at these trial points
3481 * we perform one more evaluation in the middle point of the interval
3482 * we build cubic model using function values and derivatives at trial
3483 points and we compare its prediction with actual value in the middle
3484 point
3485 * in case difference between prediction and actual value is higher than
3486 some predetermined threshold, algorithm stops with completion code -7;
3487 Rep.VarIdx is set to index of the parameter with incorrect derivative.
3488* after verification is over, algorithm proceeds to the actual optimization.
3489
3490NOTE 1: verification needs N (parameters count) gradient evaluations. It
3491 is very costly and you should use it only for low dimensional
3492 problems, when you want to be sure that you've correctly
3493 calculated analytic derivatives. You should not use it in the
3494 production code (unless you want to check derivatives provided by
3495 some third party).
3496
3497NOTE 2: you should carefully choose TestStep. Value which is too large
3498 (so large that function behaviour is significantly non-cubic) will
3499 lead to false alarms. You may use different step for different
3500 parameters by means of setting scale with MinBLEICSetScale().
3501
3502NOTE 3: this function may lead to false positives. In case it reports that
3503 I-th derivative was calculated incorrectly, you may decrease test
3504 step and try one more time - maybe your function changes too
3505 sharply and your step is too large for such rapidly chanding
3506 function.
3507
3508INPUT PARAMETERS:
3509 State - structure used to store algorithm state
3510 TestStep - verification step:
3511 * TestStep=0 turns verification off
3512 * TestStep>0 activates verification
3513
3514 -- ALGLIB --
3515 Copyright 15.06.2012 by Bochkanov Sergey
3516*************************************************************************/
3517void minbleicsetgradientcheck(const minbleicstate &state, const double teststep);
3518
3519
3520
3521/*************************************************************************
3522 CONSTRAINED QUADRATIC PROGRAMMING
3523
3524The subroutine creates QP optimizer. After initial creation, it contains
3525default optimization problem with zero quadratic and linear terms and no
3526constraints. You should set quadratic/linear terms with calls to functions
3527provided by MinQP subpackage.
3528
3529You should also choose appropriate QP solver and set it and its stopping
3530criteria by means of MinQPSetAlgo??????() function. Then, you should start
3531solution process by means of MinQPOptimize() call. Solution itself can be
3532obtained with MinQPResults() function.
3533
3534Following solvers are recommended:
3535* QuickQP for dense problems with box-only constraints (or no constraints
3536 at all)
3537* QP-BLEIC for dense/sparse problems with moderate (up to 50) number of
3538 general linear constraints
3539* DENSE-AUL-QP for dense problems with any (small or large) number of
3540 general linear constraints
3541
3542INPUT PARAMETERS:
3543 N - problem size
3544
3545OUTPUT PARAMETERS:
3546 State - optimizer with zero quadratic/linear terms
3547 and no constraints
3548
3549 -- ALGLIB --
3550 Copyright 11.01.2011 by Bochkanov Sergey
3551*************************************************************************/
3552void minqpcreate(const ae_int_t n, minqpstate &state);
3553
3554
3555/*************************************************************************
3556This function sets linear term for QP solver.
3557
3558By default, linear term is zero.
3559
3560INPUT PARAMETERS:
3561 State - structure which stores algorithm state
3562 B - linear term, array[N].
3563
3564 -- ALGLIB --
3565 Copyright 11.01.2011 by Bochkanov Sergey
3566*************************************************************************/
3567void minqpsetlinearterm(const minqpstate &state, const real_1d_array &b);
3568
3569
3570/*************************************************************************
3571This function sets dense quadratic term for QP solver. By default,
3572quadratic term is zero.
3573
3574SUPPORT BY QP SOLVERS:
3575
3576Dense quadratic term can be handled by following QP solvers:
3577* QuickQP
3578* BLEIC-QP
3579* Dense-AUL-QP
3580
3581IMPORTANT:
3582
3583This solver minimizes following function:
3584 f(x) = 0.5*x'*A*x + b'*x.
3585Note that quadratic term has 0.5 before it. So if you want to minimize
3586 f(x) = x^2 + x
3587you should rewrite your problem as follows:
3588 f(x) = 0.5*(2*x^2) + x
3589and your matrix A will be equal to [[2.0]], not to [[1.0]]
3590
3591INPUT PARAMETERS:
3592 State - structure which stores algorithm state
3593 A - matrix, array[N,N]
3594 IsUpper - (optional) storage type:
3595 * if True, symmetric matrix A is given by its upper
3596 triangle, and the lower triangle isn't used
3597 * if False, symmetric matrix A is given by its lower
3598 triangle, and the upper triangle isn't used
3599 * if not given, both lower and upper triangles must be
3600 filled.
3601
3602 -- ALGLIB --
3603 Copyright 11.01.2011 by Bochkanov Sergey
3604*************************************************************************/
3605void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a, const bool isupper);
3606void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a);
3607
3608
3609/*************************************************************************
3610This function sets sparse quadratic term for QP solver. By default,
3611quadratic term is zero. This function overrides previous calls to
3612minqpsetquadraticterm() or minqpsetquadratictermsparse().
3613
3614SUPPORT BY QP SOLVERS:
3615
3616Sparse quadratic term can be handled by following QP solvers:
3617* QuickQP
3618* BLEIC-QP
3619* Dense-AUL-QP (internally converts sparse matrix to dense format)
3620
3621IMPORTANT:
3622
3623This solver minimizes following function:
3624 f(x) = 0.5*x'*A*x + b'*x.
3625Note that quadratic term has 0.5 before it. So if you want to minimize
3626 f(x) = x^2 + x
3627you should rewrite your problem as follows:
3628 f(x) = 0.5*(2*x^2) + x
3629and your matrix A will be equal to [[2.0]], not to [[1.0]]
3630
3631INPUT PARAMETERS:
3632 State - structure which stores algorithm state
3633 A - matrix, array[N,N]
3634 IsUpper - (optional) storage type:
3635 * if True, symmetric matrix A is given by its upper
3636 triangle, and the lower triangle isn't used
3637 * if False, symmetric matrix A is given by its lower
3638 triangle, and the upper triangle isn't used
3639 * if not given, both lower and upper triangles must be
3640 filled.
3641
3642 -- ALGLIB --
3643 Copyright 11.01.2011 by Bochkanov Sergey
3644*************************************************************************/
3645void minqpsetquadratictermsparse(const minqpstate &state, const sparsematrix &a, const bool isupper);
3646
3647
3648/*************************************************************************
3649This function sets starting point for QP solver. It is useful to have
3650good initial approximation to the solution, because it will increase
3651speed of convergence and identification of active constraints.
3652
3653INPUT PARAMETERS:
3654 State - structure which stores algorithm state
3655 X - starting point, array[N].
3656
3657 -- ALGLIB --
3658 Copyright 11.01.2011 by Bochkanov Sergey
3659*************************************************************************/
3660void minqpsetstartingpoint(const minqpstate &state, const real_1d_array &x);
3661
3662
3663/*************************************************************************
3664This function sets origin for QP solver. By default, following QP program
3665is solved:
3666
3667 min(0.5*x'*A*x+b'*x)
3668
3669This function allows to solve different problem:
3670
3671 min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin))
3672
3673Specification of non-zero origin affects function being minimized, but not
3674constraints. Box and linear constraints are still calculated without
3675origin.
3676
3677INPUT PARAMETERS:
3678 State - structure which stores algorithm state
3679 XOrigin - origin, array[N].
3680
3681 -- ALGLIB --
3682 Copyright 11.01.2011 by Bochkanov Sergey
3683*************************************************************************/
3684void minqpsetorigin(const minqpstate &state, const real_1d_array &xorigin);
3685
3686
3687/*************************************************************************
3688This function sets scaling coefficients.
3689
3690ALGLIB optimizers use scaling matrices to test stopping conditions (step
3691size and gradient are scaled before comparison with tolerances) and as
3692preconditioner.
3693
3694Scale of the I-th variable is a translation invariant measure of:
3695a) "how large" the variable is
3696b) how large the step should be to make significant changes in the function
3697
3698INPUT PARAMETERS:
3699 State - structure stores algorithm state
3700 S - array[N], non-zero scaling coefficients
3701 S[i] may be negative, sign doesn't matter.
3702
3703 -- ALGLIB --
3704 Copyright 14.01.2011 by Bochkanov Sergey
3705*************************************************************************/
3706void minqpsetscale(const minqpstate &state, const real_1d_array &s);
3707
3708
3709/*************************************************************************
3710DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
3711
3712
3713This function tells solver to use Cholesky-based algorithm. This algorithm
3714was deprecated in ALGLIB 3.9.0 because its performance is inferior to that
3715of BLEIC-QP or QuickQP on high-dimensional problems. Furthermore, it
3716supports only dense convex QP problems.
3717
3718This solver is no longer active by default.
3719
3720We recommend you to switch to AUL-QP, BLEIC-QP or QuickQP solver.
3721
3722
3723DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
3724
3725 -- ALGLIB --
3726 Copyright 11.01.2011 by Bochkanov Sergey
3727*************************************************************************/
3728void minqpsetalgocholesky(const minqpstate &state);
3729
3730
3731/*************************************************************************
3732This function tells solver to use BLEIC-based algorithm and sets stopping
3733criteria for the algorithm.
3734
3735ALGORITHM FEATURES:
3736
3737* supports dense and sparse QP problems
3738* supports box and general linear equality/inequality constraints
3739* can solve all types of problems (convex, semidefinite, nonconvex) as
3740 long as they are bounded from below under constraints.
3741 Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1".
3742 Of course, global minimum is found only for positive definite and
3743 semidefinite problems. As for indefinite ones - only local minimum is
3744 found.
3745
3746ALGORITHM OUTLINE:
3747
3748* BLEIC-QP solver is just a driver function for MinBLEIC solver; it solves
3749 quadratic programming problem as general linearly constrained
3750 optimization problem, which is solved by means of BLEIC solver (part of
3751 ALGLIB, active set method).
3752
3753ALGORITHM LIMITATIONS:
3754
3755* this algorithm is fast enough for large-scale problems with small amount
3756 of general linear constraints (say, up to 50), but it is inefficient
3757 for problems with several hundreds of constraints. Iteration cost is
3758 roughly quadratic w.r.t. constraint count.
3759 Furthermore, it can not efficiently handle sparse constraints (they are
3760 converted to dense format prior to solution).
3761 Thus, if you have large and/or sparse constraint matrix and convex QP
3762 problem, Dense-AUL-QP solver may be better solution.
3763* unlike QuickQP solver, this algorithm does not perform Newton steps and
3764 does not use Level 3 BLAS. Being general-purpose active set method, it
3765 can activate constraints only one-by-one. Thus, its performance is lower
3766 than that of QuickQP.
3767* its precision is also a bit inferior to that of QuickQP. BLEIC-QP
3768 performs only LBFGS steps (no Newton steps), which are good at detecting
3769 neighborhood of the solution, buy needs many iterations to find solution
3770 with more than 6 digits of precision.
3771
3772INPUT PARAMETERS:
3773 State - structure which stores algorithm state
3774 EpsG - >=0
3775 The subroutine finishes its work if the condition
3776 |v|<EpsG is satisfied, where:
3777 * |.| means Euclidian norm
3778 * v - scaled constrained gradient vector, v[i]=g[i]*s[i]
3779 * g - gradient
3780 * s - scaling coefficients set by MinQPSetScale()
3781 EpsF - >=0
3782 The subroutine finishes its work if exploratory steepest
3783 descent step on k+1-th iteration satisfies following
3784 condition: |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
3785 EpsX - >=0
3786 The subroutine finishes its work if exploratory steepest
3787 descent step on k+1-th iteration satisfies following
3788 condition:
3789 * |.| means Euclidian norm
3790 * v - scaled step vector, v[i]=dx[i]/s[i]
3791 * dx - step vector, dx=X(k+1)-X(k)
3792 * s - scaling coefficients set by MinQPSetScale()
3793 MaxIts - maximum number of iterations. If MaxIts=0, the number of
3794 iterations is unlimited. NOTE: this algorithm uses LBFGS
3795 iterations, which are relatively cheap, but improve
3796 function value only a bit. So you will need many iterations
3797 to converge - from 0.1*N to 10*N, depending on problem's
3798 condition number.
3799
3800IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS ALGORITHM
3801BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT!
3802
3803Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
3804to automatic stopping criterion selection (presently it is small step
3805length, but it may change in the future versions of ALGLIB).
3806
3807 -- ALGLIB --
3808 Copyright 11.01.2011 by Bochkanov Sergey
3809*************************************************************************/
3810void minqpsetalgobleic(const minqpstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
3811
3812
3813/*************************************************************************
3814This function tells QP solver to use Dense-AUL algorithm and sets stopping
3815criteria for the algorithm.
3816
3817ALGORITHM FEATURES:
3818
3819* supports dense and sparse QP problems; although it uses dense Cholesky
3820 to build preconditioner, it still works faster for sparse problems.
3821* supports box and dense/sparse general linear equality/inequality
3822 constraints
3823* convergence is theoretically proved for positive-definite (convex) QP
3824 problems. Semidefinite and non-convex problems can be solved as long as
3825 they are bounded from below under constraints, although without
3826 theoretical guarantees.
3827* this solver is better than QP-BLEIC on problems with large number of
3828 general linear constraints.
3829
3830ALGORITHM OUTLINE:
3831
3832* this algorithm is an augmented Lagrangian method with dense
3833 preconditioner (hence its name). It is similar to barrier/penalty
3834 methods, but much more precise and faster.
3835* it performs several outer iterations in order to refine values of the
3836 Lagrange multipliers. Single outer iteration is a solution of some
3837 unconstrained optimization problem: first it performs dense Cholesky
3838 factorization of the Hessian in order to build preconditioner (adaptive
3839 regularization is applied to enforce positive definiteness), and then
3840 it uses L-BFGS optimizer to solve optimization problem.
3841* typically you need about 5-10 outer iterations to converge to solution
3842
3843ALGORITHM LIMITATIONS:
3844
3845* because dense Cholesky driver is used, this algorithm has O(N^2) memory
3846 requirements and O(OuterIterations*N^3) minimum running time. From the
3847 practical point of view, it limits its applicability by several
3848 thousands of variables.
3849 From the other side, variables count is the most limiting factor,
3850 and dependence on constraint count is much more lower. Assuming that
3851 constraint matrix is sparse, it may handle tens of thousands of general
3852 linear constraints.
3853* its precision is lower than that of BLEIC-QP and QuickQP. It is hard to
3854 find solution with more than 6 digits of precision.
3855
3856INPUT PARAMETERS:
3857 State - structure which stores algorithm state
3858 EpsX - >=0, stopping criteria for inner optimizer.
3859 Inner iterations are stopped when step length (with
3860 variable scaling being applied) is less than EpsX.
3861 See minqpsetscale() for more information on variable
3862 scaling.
3863 Rho - penalty coefficient, Rho>0:
3864 * large enough that algorithm converges with desired
3865 precision.
3866 * not TOO large to prevent ill-conditioning
3867 * recommended values are 100, 1000 or 10000
3868 ItsCnt - number of outer iterations:
3869 * recommended values: 10-15 (although in most cases it
3870 converges within 5 iterations, you may need a few more
3871 to be sure).
3872 * ItsCnt=0 means that small number of outer iterations is
3873 automatically chosen (10 iterations in current version).
3874 * ItsCnt=1 means that AUL algorithm performs just as usual
3875 barrier method.
3876 * ItsCnt>1 means that AUL algorithm performs specified
3877 number of outer iterations
3878
3879IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS ALGORITHM
3880BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT!
3881
3882NOTE: Passing EpsX=0 will lead to automatic step length selection
3883 (specific step length chosen may change in the future versions of
3884 ALGLIB, so it is better to specify step length explicitly).
3885
3886 -- ALGLIB --
3887 Copyright 20.08.2016 by Bochkanov Sergey
3888*************************************************************************/
3889void minqpsetalgodenseaul(const minqpstate &state, const double epsx, const double rho, const ae_int_t itscnt);
3890
3891
3892/*************************************************************************
3893This function tells solver to use QuickQP algorithm: special extra-fast
3894algorithm for problems with box-only constrants. It may solve non-convex
3895problems as long as they are bounded from below under constraints.
3896
3897ALGORITHM FEATURES:
3898* many times (from 5x to 50x!) faster than BLEIC-based QP solver; utilizes
3899 accelerated methods for activation of constraints.
3900* supports dense and sparse QP problems
3901* supports ONLY box constraints; general linear constraints are NOT
3902 supported by this solver
3903* can solve all types of problems (convex, semidefinite, nonconvex) as
3904 long as they are bounded from below under constraints.
3905 Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1".
3906 In convex/semidefinite case global minimum is returned, in nonconvex
3907 case - algorithm returns one of the local minimums.
3908
3909ALGORITHM OUTLINE:
3910
3911* algorithm performs two kinds of iterations: constrained CG iterations
3912 and constrained Newton iterations
3913* initially it performs small number of constrained CG iterations, which
3914 can efficiently activate/deactivate multiple constraints
3915* after CG phase algorithm tries to calculate Cholesky decomposition and
3916 to perform several constrained Newton steps. If Cholesky decomposition
3917 failed (matrix is indefinite even under constraints), we perform more
3918 CG iterations until we converge to such set of constraints that system
3919 matrix becomes positive definite. Constrained Newton steps greatly
3920 increase convergence speed and precision.
3921* algorithm interleaves CG and Newton iterations which allows to handle
3922 indefinite matrices (CG phase) and quickly converge after final set of
3923 constraints is found (Newton phase). Combination of CG and Newton phases
3924 is called "outer iteration".
3925* it is possible to turn off Newton phase (beneficial for semidefinite
3926 problems - Cholesky decomposition will fail too often)
3927
3928ALGORITHM LIMITATIONS:
3929
3930* algorithm does not support general linear constraints; only box ones
3931 are supported
3932* Cholesky decomposition for sparse problems is performed with Skyline
3933 Cholesky solver, which is intended for low-profile matrices. No profile-
3934 reducing reordering of variables is performed in this version of ALGLIB.
3935* problems with near-zero negative eigenvalues (or exacty zero ones) may
3936 experience about 2-3x performance penalty. The reason is that Cholesky
3937 decomposition can not be performed until we identify directions of zero
3938 and negative curvature and activate corresponding boundary constraints -
3939 but we need a lot of trial and errors because these directions are hard
3940 to notice in the matrix spectrum.
3941 In this case you may turn off Newton phase of algorithm.
3942 Large negative eigenvalues are not an issue, so highly non-convex
3943 problems can be solved very efficiently.
3944
3945INPUT PARAMETERS:
3946 State - structure which stores algorithm state
3947 EpsG - >=0
3948 The subroutine finishes its work if the condition
3949 |v|<EpsG is satisfied, where:
3950 * |.| means Euclidian norm
3951 * v - scaled constrained gradient vector, v[i]=g[i]*s[i]
3952 * g - gradient
3953 * s - scaling coefficients set by MinQPSetScale()
3954 EpsF - >=0
3955 The subroutine finishes its work if exploratory steepest
3956 descent step on k+1-th iteration satisfies following
3957 condition: |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
3958 EpsX - >=0
3959 The subroutine finishes its work if exploratory steepest
3960 descent step on k+1-th iteration satisfies following
3961 condition:
3962 * |.| means Euclidian norm
3963 * v - scaled step vector, v[i]=dx[i]/s[i]
3964 * dx - step vector, dx=X(k+1)-X(k)
3965 * s - scaling coefficients set by MinQPSetScale()
3966 MaxOuterIts-maximum number of OUTER iterations. One outer iteration
3967 includes some amount of CG iterations (from 5 to ~N) and
3968 one or several (usually small amount) Newton steps. Thus,
3969 one outer iteration has high cost, but can greatly reduce
3970 funcation value.
3971 Use 0 if you do not want to limit number of outer iterations.
3972 UseNewton- use Newton phase or not:
3973 * Newton phase improves performance of positive definite
3974 dense problems (about 2 times improvement can be observed)
3975 * can result in some performance penalty on semidefinite
3976 or slightly negative definite problems - each Newton
3977 phase will bring no improvement (Cholesky failure), but
3978 still will require computational time.
3979 * if you doubt, you can turn off this phase - optimizer
3980 will retain its most of its high speed.
3981
3982IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS ALGORITHM
3983BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT!
3984
3985Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
3986to automatic stopping criterion selection (presently it is small step
3987length, but it may change in the future versions of ALGLIB).
3988
3989 -- ALGLIB --
3990 Copyright 22.05.2014 by Bochkanov Sergey
3991*************************************************************************/
3992void minqpsetalgoquickqp(const minqpstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxouterits, const bool usenewton);
3993
3994
3995/*************************************************************************
3996This function sets box constraints for QP solver
3997
3998Box constraints are inactive by default (after initial creation). After
3999being set, they are preserved until explicitly turned off with another
4000SetBC() call.
4001
4002All QP solvers may handle box constraints.
4003
4004INPUT PARAMETERS:
4005 State - structure stores algorithm state
4006 BndL - lower bounds, array[N].
4007 If some (all) variables are unbounded, you may specify
4008 very small number or -INF (latter is recommended because
4009 it will allow solver to use better algorithm).
4010 BndU - upper bounds, array[N].
4011 If some (all) variables are unbounded, you may specify
4012 very large number or +INF (latter is recommended because
4013 it will allow solver to use better algorithm).
4014
4015NOTE: it is possible to specify BndL[i]=BndU[i]. In this case I-th
4016variable will be "frozen" at X[i]=BndL[i]=BndU[i].
4017
4018 -- ALGLIB --
4019 Copyright 11.01.2011 by Bochkanov Sergey
4020*************************************************************************/
4021void minqpsetbc(const minqpstate &state, const real_1d_array &bndl, const real_1d_array &bndu);
4022
4023
4024/*************************************************************************
4025This function sets dense linear constraints for QP optimizer.
4026
4027This function overrides results of previous calls to minqpsetlc(),
4028minqpsetlcsparse() and minqpsetlcmixed(). After call to this function
4029sparse constraints are dropped, and you have only those constraints which
4030were specified in the present call.
4031
4032If you want to specify mixed (with dense and sparse terms) linear
4033constraints, you should call minqpsetlcmixed().
4034
4035SUPPORT BY QP SOLVERS:
4036
4037Following QP solvers can handle dense linear constraints:
4038* BLEIC-QP - handles them with high precision, but may be
4039 inefficient for problems with hundreds of constraints
4040* Dense-AUL-QP - handles them with moderate precision (approx. 10^-6),
4041 may efficiently handle thousands of constraints.
4042
4043Following QP solvers can NOT handle dense linear constraints:
4044* QuickQP - can not handle general linear constraints
4045
4046INPUT PARAMETERS:
4047 State - structure previously allocated with MinQPCreate call.
4048 C - linear constraints, array[K,N+1].
4049 Each row of C represents one constraint, either equality
4050 or inequality (see below):
4051 * first N elements correspond to coefficients,
4052 * last element corresponds to the right part.
4053 All elements of C (including right part) must be finite.
4054 CT - type of constraints, array[K]:
4055 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
4056 * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
4057 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
4058 K - number of equality/inequality constraints, K>=0:
4059 * if given, only leading K elements of C/CT are used
4060 * if not given, automatically determined from sizes of C/CT
4061
4062NOTE 1: linear (non-bound) constraints are satisfied only approximately -
4063 there always exists some violation due to numerical errors and
4064 algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP
4065 solver is less precise).
4066
4067 -- ALGLIB --
4068 Copyright 19.06.2012 by Bochkanov Sergey
4069*************************************************************************/
4070void minqpsetlc(const minqpstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k);
4071void minqpsetlc(const minqpstate &state, const real_2d_array &c, const integer_1d_array &ct);
4072
4073
4074/*************************************************************************
4075This function sets sparse linear constraints for QP optimizer.
4076
4077This function overrides results of previous calls to minqpsetlc(),
4078minqpsetlcsparse() and minqpsetlcmixed(). After call to this function
4079dense constraints are dropped, and you have only those constraints which
4080were specified in the present call.
4081
4082If you want to specify mixed (with dense and sparse terms) linear
4083constraints, you should call minqpsetlcmixed().
4084
4085SUPPORT BY QP SOLVERS:
4086
4087Following QP solvers can handle sparse linear constraints:
4088* BLEIC-QP - handles them with high precision, but can not
4089 utilize their sparsity - sparse constraint matrix
4090 is silently converted to dense format. Thus, it
4091 may be inefficient for problems with hundreds of
4092 constraints.
4093* Dense-AUL-QP - although this solver uses dense linear algebra to
4094 calculate Cholesky preconditioner, it may
4095 efficiently handle sparse constraints. It may
4096 solve problems with hundreds and thousands of
4097 constraints. The only drawback is that precision
4098 of constraint handling is typically within 1E-4...
4099 ..1E-6 range.
4100
4101Following QP solvers can NOT handle sparse linear constraints:
4102* QuickQP - can not handle general linear constraints
4103
4104INPUT PARAMETERS:
4105 State - structure previously allocated with MinQPCreate call.
4106 C - linear constraints, sparse matrix with dimensions at
4107 least [K,N+1]. If matrix has larger size, only leading
4108 Kx(N+1) rectangle is used.
4109 Each row of C represents one constraint, either equality
4110 or inequality (see below):
4111 * first N elements correspond to coefficients,
4112 * last element corresponds to the right part.
4113 All elements of C (including right part) must be finite.
4114 CT - type of constraints, array[K]:
4115 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
4116 * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
4117 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
4118 K - number of equality/inequality constraints, K>=0
4119
4120NOTE 1: linear (non-bound) constraints are satisfied only approximately -
4121 there always exists some violation due to numerical errors and
4122 algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP
4123 solver is less precise).
4124
4125 -- ALGLIB --
4126 Copyright 22.08.2016 by Bochkanov Sergey
4127*************************************************************************/
4128void minqpsetlcsparse(const minqpstate &state, const sparsematrix &c, const integer_1d_array &ct, const ae_int_t k);
4129
4130
4131/*************************************************************************
4132This function sets mixed linear constraints, which include a set of dense
4133rows, and a set of sparse rows.
4134
4135This function overrides results of previous calls to minqpsetlc(),
4136minqpsetlcsparse() and minqpsetlcmixed().
4137
4138This function may be useful if constraint matrix includes large number of
4139both types of rows - dense and sparse. If you have just a few sparse rows,
4140you may represent them in dense format without loosing performance.
4141Similarly, if you have just a few dense rows, you may store them in sparse
4142format with almost same performance.
4143
4144SUPPORT BY QP SOLVERS:
4145
4146Following QP solvers can handle mixed dense/sparse linear constraints:
4147* BLEIC-QP - handles them with high precision, but can not
4148 utilize their sparsity - sparse constraint matrix
4149 is silently converted to dense format. Thus, it
4150 may be inefficient for problems with hundreds of
4151 constraints.
4152* Dense-AUL-QP - although this solver uses dense linear algebra to
4153 calculate Cholesky preconditioner, it may
4154 efficiently handle sparse constraints. It may
4155 solve problems with hundreds and thousands of
4156 constraints. The only drawback is that precision
4157 of constraint handling is typically within 1E-4...
4158 ..1E-6 range.
4159
4160Following QP solvers can NOT handle mixed linear constraints:
4161* QuickQP - can not handle general linear constraints at all
4162
4163INPUT PARAMETERS:
4164 State - structure previously allocated with MinQPCreate call.
4165 DenseC - dense linear constraints, array[K,N+1].
4166 Each row of DenseC represents one constraint, either equality
4167 or inequality (see below):
4168 * first N elements correspond to coefficients,
4169 * last element corresponds to the right part.
4170 All elements of DenseC (including right part) must be finite.
4171 DenseCT - type of constraints, array[K]:
4172 * if DenseCT[i]>0, then I-th constraint is DenseC[i,*]*x >= DenseC[i,n+1]
4173 * if DenseCT[i]=0, then I-th constraint is DenseC[i,*]*x = DenseC[i,n+1]
4174 * if DenseCT[i]<0, then I-th constraint is DenseC[i,*]*x <= DenseC[i,n+1]
4175 DenseK - number of equality/inequality constraints, DenseK>=0
4176 SparseC - linear constraints, sparse matrix with dimensions at
4177 least [SparseK,N+1]. If matrix has larger size, only leading
4178 SPARSEKx(N+1) rectangle is used.
4179 Each row of C represents one constraint, either equality
4180 or inequality (see below):
4181 * first N elements correspond to coefficients,
4182 * last element corresponds to the right part.
4183 All elements of C (including right part) must be finite.
4184 SparseCT- type of sparse constraints, array[K]:
4185 * if SparseCT[i]>0, then I-th constraint is SparseC[i,*]*x >= SparseC[i,n+1]
4186 * if SparseCT[i]=0, then I-th constraint is SparseC[i,*]*x = SparseC[i,n+1]
4187 * if SparseCT[i]<0, then I-th constraint is SparseC[i,*]*x <= SparseC[i,n+1]
4188 SparseK - number of sparse equality/inequality constraints, K>=0
4189
4190NOTE 1: linear (non-bound) constraints are satisfied only approximately -
4191 there always exists some violation due to numerical errors and
4192 algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP
4193 solver is less precise).
4194
4195 -- ALGLIB --
4196 Copyright 22.08.2016 by Bochkanov Sergey
4197*************************************************************************/
4198void minqpsetlcmixed(const minqpstate &state, const real_2d_array &densec, const integer_1d_array &densect, const ae_int_t densek, const sparsematrix &sparsec, const integer_1d_array &sparsect, const ae_int_t sparsek);
4199
4200
4201/*************************************************************************
4202This function solves quadratic programming problem.
4203
4204Prior to calling this function you should choose solver by means of one of
4205the following functions:
4206
4207* minqpsetalgoquickqp() - for QuickQP solver
4208* minqpsetalgobleic() - for BLEIC-QP solver
4209* minqpsetalgodenseaul() - for Dense-AUL-QP solver
4210
4211These functions also allow you to control stopping criteria of the solver.
4212If you did not set solver, MinQP subpackage will automatically select
4213solver for your problem and will run it with default stopping criteria.
4214
4215However, it is better to set explicitly solver and its stopping criteria.
4216
4217INPUT PARAMETERS:
4218 State - algorithm state
4219
4220You should use MinQPResults() function to access results after calls
4221to this function.
4222
4223 -- ALGLIB --
4224 Copyright 11.01.2011 by Bochkanov Sergey.
4225 Special thanks to Elvira Illarionova for important suggestions on
4226 the linearly constrained QP algorithm.
4227*************************************************************************/
4228void minqpoptimize(const minqpstate &state);
4229
4230
4231/*************************************************************************
4232QP solver results
4233
4234INPUT PARAMETERS:
4235 State - algorithm state
4236
4237OUTPUT PARAMETERS:
4238 X - array[0..N-1], solution.
4239 This array is allocated and initialized only when
4240 Rep.TerminationType parameter is positive (success).
4241 Rep - optimization report. You should check Rep.TerminationType,
4242 which contains completion code, and you may check another
4243 fields which contain another information about algorithm
4244 functioning.
4245
4246 Failure codes returned by algorithm are:
4247 * -5 inappropriate solver was used:
4248 * QuickQP solver for problem with general linear
4249 constraints
4250 * -4 BLEIC-QP/QuickQP solver found unconstrained
4251 direction of negative curvature (function is
4252 unbounded from below even under constraints), no
4253 meaningful minimum can be found.
4254 * -3 inconsistent constraints (or maybe feasible point
4255 is too hard to find). If you are sure that
4256 constraints are feasible, try to restart optimizer
4257 with better initial approximation.
4258
4259 Completion codes specific for Cholesky algorithm:
4260 * 4 successful completion
4261
4262 Completion codes specific for BLEIC/QuickQP algorithms:
4263 * 1 relative function improvement is no more than EpsF.
4264 * 2 scaled step is no more than EpsX.
4265 * 4 scaled gradient norm is no more than EpsG.
4266 * 5 MaxIts steps was taken
4267
4268 -- ALGLIB --
4269 Copyright 11.01.2011 by Bochkanov Sergey
4270*************************************************************************/
4271void minqpresults(const minqpstate &state, real_1d_array &x, minqpreport &rep);
4272
4273
4274/*************************************************************************
4275QP results
4276
4277Buffered implementation of MinQPResults() which uses pre-allocated buffer
4278to store X[]. If buffer size is too small, it resizes buffer. It is
4279intended to be used in the inner cycles of performance critical algorithms
4280where array reallocation penalty is too large to be ignored.
4281
4282 -- ALGLIB --
4283 Copyright 11.01.2011 by Bochkanov Sergey
4284*************************************************************************/
4285void minqpresultsbuf(const minqpstate &state, real_1d_array &x, minqpreport &rep);
4286
4287/*************************************************************************
4288 NONLINEARLY CONSTRAINED OPTIMIZATION
4289 WITH PRECONDITIONED AUGMENTED LAGRANGIAN ALGORITHM
4290
4291DESCRIPTION:
4292The subroutine minimizes function F(x) of N arguments subject to any
4293combination of:
4294* bound constraints
4295* linear inequality constraints
4296* linear equality constraints
4297* nonlinear equality constraints Gi(x)=0
4298* nonlinear inequality constraints Hi(x)<=0
4299
4300REQUIREMENTS:
4301* user must provide function value and gradient for F(), H(), G()
4302* starting point X0 must be feasible or not too far away from the feasible
4303 set
4304* F(), G(), H() are twice continuously differentiable on the feasible set
4305 and its neighborhood
4306* nonlinear constraints G() and H() must have non-zero gradient at G(x)=0
4307 and at H(x)=0. Say, constraint like x^2>=1 is supported, but x^2>=0 is
4308 NOT supported.
4309
4310USAGE:
4311
4312Constrained optimization if far more complex than the unconstrained one.
4313Nonlinearly constrained optimization is one of the most esoteric numerical
4314procedures.
4315
4316Here we give very brief outline of the MinNLC optimizer. We strongly
4317recommend you to study examples in the ALGLIB Reference Manual and to read
4318ALGLIB User Guide on optimization, which is available at
4319http://www.alglib.net/optimization/
4320
43211. User initializes algorithm state with MinNLCCreate() call and chooses
4322 what NLC solver to use. There is some solver which is used by default,
4323 with default settings, but you should NOT rely on default choice. It
4324 may change in future releases of ALGLIB without notice, and no one can
4325 guarantee that new solver will be able to solve your problem with
4326 default settings.
4327
4328 From the other side, if you choose solver explicitly, you can be pretty
4329 sure that it will work with new ALGLIB releases.
4330
4331 In the current release following solvers can be used:
4332 * AUL solver (activated with MinNLCSetAlgoAUL() function)
4333
43342. User adds boundary and/or linear and/or nonlinear constraints by means
4335 of calling one of the following functions:
4336 a) MinNLCSetBC() for boundary constraints
4337 b) MinNLCSetLC() for linear constraints
4338 c) MinNLCSetNLC() for nonlinear constraints
4339 You may combine (a), (b) and (c) in one optimization problem.
4340
43413. User sets scale of the variables with MinNLCSetScale() function. It is
4342 VERY important to set scale of the variables, because nonlinearly
4343 constrained problems are hard to solve when variables are badly scaled.
4344
43454. User sets stopping conditions with MinNLCSetCond(). If NLC solver
4346 uses inner/outer iteration layout, this function sets stopping
4347 conditions for INNER iterations.
4348
43495. User chooses one of the preconditioning methods. Preconditioning is
4350 very important for efficient handling of boundary/linear/nonlinear
4351 constraints. Without preconditioning algorithm would require thousands
4352 of iterations even for simple problems. Several preconditioners can be
4353 used:
4354 a) inexact LBFGS-based, with O(N*K) evaluation time
4355 b) exact low rank one, with O(N*K^2) evaluation time
4356 c) exact robust one, with O(N^3+K*N^2) evaluation time
4357 where K is a total number of general linear and nonlinear constraints
4358 (box ones are not counted).
4359 Since version 3.11.0 ALGLIB uses exact robust preconditioner as default
4360 option, but in some cases exact low rank one may be better option.
4361
43626. Finally, user calls MinNLCOptimize() function which takes algorithm
4363 state and pointer (delegate, etc.) to callback function which calculates
4364 F/G/H.
4365
43667. User calls MinNLCResults() to get solution
4367
43688. Optionally user may call MinNLCRestartFrom() to solve another problem
4369 with same N but another starting point. MinNLCRestartFrom() allows to
4370 reuse already initialized structure.
4371
4372
4373INPUT PARAMETERS:
4374 N - problem dimension, N>0:
4375 * if given, only leading N elements of X are used
4376 * if not given, automatically determined from size ofX
4377 X - starting point, array[N]:
4378 * it is better to set X to a feasible point
4379 * but X can be infeasible, in which case algorithm will try
4380 to find feasible point first, using X as initial
4381 approximation.
4382
4383OUTPUT PARAMETERS:
4384 State - structure stores algorithm state
4385
4386 -- ALGLIB --
4387 Copyright 06.06.2014 by Bochkanov Sergey
4388*************************************************************************/
4389void minnlccreate(const ae_int_t n, const real_1d_array &x, minnlcstate &state);
4390void minnlccreate(const real_1d_array &x, minnlcstate &state);
4391
4392
4393/*************************************************************************
4394This subroutine is a finite difference variant of MinNLCCreate(). It uses
4395finite differences in order to differentiate target function.
4396
4397Description below contains information which is specific to this function
4398only. We recommend to read comments on MinNLCCreate() in order to get more
4399information about creation of NLC optimizer.
4400
4401INPUT PARAMETERS:
4402 N - problem dimension, N>0:
4403 * if given, only leading N elements of X are used
4404 * if not given, automatically determined from size ofX
4405 X - starting point, array[N]:
4406 * it is better to set X to a feasible point
4407 * but X can be infeasible, in which case algorithm will try
4408 to find feasible point first, using X as initial
4409 approximation.
4410 DiffStep- differentiation step, >0
4411
4412OUTPUT PARAMETERS:
4413 State - structure stores algorithm state
4414
4415NOTES:
44161. algorithm uses 4-point central formula for differentiation.
44172. differentiation step along I-th axis is equal to DiffStep*S[I] where
4418 S[] is scaling vector which can be set by MinNLCSetScale() call.
44193. we recommend you to use moderate values of differentiation step. Too
4420 large step will result in too large TRUNCATION errors, while too small
4421 step will result in too large NUMERICAL errors. 1.0E-4 can be good
4422 value to start from.
44234. Numerical differentiation is very inefficient - one gradient
4424 calculation needs 4*N function evaluations. This function will work for
4425 any N - either small (1...10), moderate (10...100) or large (100...).
4426 However, performance penalty will be too severe for any N's except for
4427 small ones.
4428 We should also say that code which relies on numerical differentiation
4429 is less robust and precise. Imprecise gradient may slow down
4430 convergence, especially on highly nonlinear problems.
4431 Thus we recommend to use this function for fast prototyping on small-
4432 dimensional problems only, and to implement analytical gradient as soon
4433 as possible.
4434
4435 -- ALGLIB --
4436 Copyright 06.06.2014 by Bochkanov Sergey
4437*************************************************************************/
4438void minnlccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minnlcstate &state);
4439void minnlccreatef(const real_1d_array &x, const double diffstep, minnlcstate &state);
4440
4441
4442/*************************************************************************
4443This function sets boundary constraints for NLC optimizer.
4444
4445Boundary constraints are inactive by default (after initial creation).
4446They are preserved after algorithm restart with MinNLCRestartFrom().
4447
4448You may combine boundary constraints with general linear ones - and with
4449nonlinear ones! Boundary constraints are handled more efficiently than
4450other types. Thus, if your problem has mixed constraints, you may
4451explicitly specify some of them as boundary and save some time/space.
4452
4453INPUT PARAMETERS:
4454 State - structure stores algorithm state
4455 BndL - lower bounds, array[N].
4456 If some (all) variables are unbounded, you may specify
4457 very small number or -INF.
4458 BndU - upper bounds, array[N].
4459 If some (all) variables are unbounded, you may specify
4460 very large number or +INF.
4461
4462NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
4463variable will be "frozen" at X[i]=BndL[i]=BndU[i].
4464
4465NOTE 2: when you solve your problem with augmented Lagrangian solver,
4466 boundary constraints are satisfied only approximately! It is
4467 possible that algorithm will evaluate function outside of
4468 feasible area!
4469
4470 -- ALGLIB --
4471 Copyright 06.06.2014 by Bochkanov Sergey
4472*************************************************************************/
4473void minnlcsetbc(const minnlcstate &state, const real_1d_array &bndl, const real_1d_array &bndu);
4474
4475
4476/*************************************************************************
4477This function sets linear constraints for MinNLC optimizer.
4478
4479Linear constraints are inactive by default (after initial creation). They
4480are preserved after algorithm restart with MinNLCRestartFrom().
4481
4482You may combine linear constraints with boundary ones - and with nonlinear
4483ones! If your problem has mixed constraints, you may explicitly specify
4484some of them as linear. It may help optimizer to handle them more
4485efficiently.
4486
4487INPUT PARAMETERS:
4488 State - structure previously allocated with MinNLCCreate call.
4489 C - linear constraints, array[K,N+1].
4490 Each row of C represents one constraint, either equality
4491 or inequality (see below):
4492 * first N elements correspond to coefficients,
4493 * last element corresponds to the right part.
4494 All elements of C (including right part) must be finite.
4495 CT - type of constraints, array[K]:
4496 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
4497 * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
4498 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
4499 K - number of equality/inequality constraints, K>=0:
4500 * if given, only leading K elements of C/CT are used
4501 * if not given, automatically determined from sizes of C/CT
4502
4503NOTE 1: when you solve your problem with augmented Lagrangian solver,
4504 linear constraints are satisfied only approximately! It is
4505 possible that algorithm will evaluate function outside of
4506 feasible area!
4507
4508 -- ALGLIB --
4509 Copyright 06.06.2014 by Bochkanov Sergey
4510*************************************************************************/
4511void minnlcsetlc(const minnlcstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k);
4512void minnlcsetlc(const minnlcstate &state, const real_2d_array &c, const integer_1d_array &ct);
4513
4514
4515/*************************************************************************
4516This function sets nonlinear constraints for MinNLC optimizer.
4517
4518In fact, this function sets NUMBER of nonlinear constraints. Constraints
4519itself (constraint functions) are passed to MinNLCOptimize() method. This
4520method requires user-defined vector function F[] and its Jacobian J[],
4521where:
4522* first component of F[] and first row of Jacobian J[] corresponds to
4523 function being minimized
4524* next NLEC components of F[] (and rows of J) correspond to nonlinear
4525 equality constraints G_i(x)=0
4526* next NLIC components of F[] (and rows of J) correspond to nonlinear
4527 inequality constraints H_i(x)<=0
4528
4529NOTE: you may combine nonlinear constraints with linear/boundary ones. If
4530 your problem has mixed constraints, you may explicitly specify some
4531 of them as linear ones. It may help optimizer to handle them more
4532 efficiently.
4533
4534INPUT PARAMETERS:
4535 State - structure previously allocated with MinNLCCreate call.
4536 NLEC - number of Non-Linear Equality Constraints (NLEC), >=0
4537 NLIC - number of Non-Linear Inquality Constraints (NLIC), >=0
4538
4539NOTE 1: when you solve your problem with augmented Lagrangian solver,
4540 nonlinear constraints are satisfied only approximately! It is
4541 possible that algorithm will evaluate function outside of
4542 feasible area!
4543
4544NOTE 2: algorithm scales variables according to scale specified by
4545 MinNLCSetScale() function, so it can handle problems with badly
4546 scaled variables (as long as we KNOW their scales).
4547
4548 However, there is no way to automatically scale nonlinear
4549 constraints Gi(x) and Hi(x). Inappropriate scaling of Gi/Hi may
4550 ruin convergence. Solving problem with constraint "1000*G0(x)=0"
4551 is NOT same as solving it with constraint "0.001*G0(x)=0".
4552
4553 It means that YOU are the one who is responsible for correct
4554 scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you
4555 to scale nonlinear constraints in such way that I-th component of
4556 dG/dX (or dH/dx) has approximately unit magnitude (for problems
4557 with unit scale) or has magnitude approximately equal to 1/S[i]
4558 (where S is a scale set by MinNLCSetScale() function).
4559
4560
4561 -- ALGLIB --
4562 Copyright 06.06.2014 by Bochkanov Sergey
4563*************************************************************************/
4564void minnlcsetnlc(const minnlcstate &state, const ae_int_t nlec, const ae_int_t nlic);
4565
4566
4567/*************************************************************************
4568This function sets stopping conditions for inner iterations of optimizer.
4569
4570INPUT PARAMETERS:
4571 State - structure which stores algorithm state
4572 EpsG - >=0
4573 The subroutine finishes its work if the condition
4574 |v|<EpsG is satisfied, where:
4575 * |.| means Euclidian norm
4576 * v - scaled gradient vector, v[i]=g[i]*s[i]
4577 * g - gradient
4578 * s - scaling coefficients set by MinNLCSetScale()
4579 EpsF - >=0
4580 The subroutine finishes its work if on k+1-th iteration
4581 the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
4582 is satisfied.
4583 EpsX - >=0
4584 The subroutine finishes its work if on k+1-th iteration
4585 the condition |v|<=EpsX is fulfilled, where:
4586 * |.| means Euclidian norm
4587 * v - scaled step vector, v[i]=dx[i]/s[i]
4588 * dx - step vector, dx=X(k+1)-X(k)
4589 * s - scaling coefficients set by MinNLCSetScale()
4590 MaxIts - maximum number of iterations. If MaxIts=0, the number of
4591 iterations is unlimited.
4592
4593Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
4594to automatic stopping criterion selection.
4595
4596 -- ALGLIB --
4597 Copyright 06.06.2014 by Bochkanov Sergey
4598*************************************************************************/
4599void minnlcsetcond(const minnlcstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
4600
4601
4602/*************************************************************************
4603This function sets scaling coefficients for NLC optimizer.
4604
4605ALGLIB optimizers use scaling matrices to test stopping conditions (step
4606size and gradient are scaled before comparison with tolerances). Scale of
4607the I-th variable is a translation invariant measure of:
4608a) "how large" the variable is
4609b) how large the step should be to make significant changes in the function
4610
4611Scaling is also used by finite difference variant of the optimizer - step
4612along I-th axis is equal to DiffStep*S[I].
4613
4614INPUT PARAMETERS:
4615 State - structure stores algorithm state
4616 S - array[N], non-zero scaling coefficients
4617 S[i] may be negative, sign doesn't matter.
4618
4619 -- ALGLIB --
4620 Copyright 06.06.2014 by Bochkanov Sergey
4621*************************************************************************/
4622void minnlcsetscale(const minnlcstate &state, const real_1d_array &s);
4623
4624
4625/*************************************************************************
4626This function sets preconditioner to "inexact LBFGS-based" mode.
4627
4628Preconditioning is very important for convergence of Augmented Lagrangian
4629algorithm because presence of penalty term makes problem ill-conditioned.
4630Difference between performance of preconditioned and unpreconditioned
4631methods can be as large as 100x!
4632
4633MinNLC optimizer may use following preconditioners, each with its own
4634benefits and drawbacks:
4635 a) inexact LBFGS-based, with O(N*K) evaluation time
4636 b) exact low rank one, with O(N*K^2) evaluation time
4637 c) exact robust one, with O(N^3+K*N^2) evaluation time
4638where K is a total number of general linear and nonlinear constraints (box
4639ones are not counted).
4640
4641Inexact LBFGS-based preconditioner uses L-BFGS formula combined with
4642orthogonality assumption to perform very fast updates. For a N-dimensional
4643problem with K general linear or nonlinear constraints (boundary ones are
4644not counted) it has O(N*K) cost per iteration. This preconditioner has
4645best quality (less iterations) when general linear and nonlinear
4646constraints are orthogonal to each other (orthogonality with respect to
4647boundary constraints is not required). Number of iterations increases when
4648constraints are non-orthogonal, because algorithm assumes orthogonality,
4649but still it is better than no preconditioner at all.
4650
4651INPUT PARAMETERS:
4652 State - structure stores algorithm state
4653
4654 -- ALGLIB --
4655 Copyright 26.09.2014 by Bochkanov Sergey
4656*************************************************************************/
4657void minnlcsetprecinexact(const minnlcstate &state);
4658
4659
4660/*************************************************************************
4661This function sets preconditioner to "exact low rank" mode.
4662
4663Preconditioning is very important for convergence of Augmented Lagrangian
4664algorithm because presence of penalty term makes problem ill-conditioned.
4665Difference between performance of preconditioned and unpreconditioned
4666methods can be as large as 100x!
4667
4668MinNLC optimizer may use following preconditioners, each with its own
4669benefits and drawbacks:
4670 a) inexact LBFGS-based, with O(N*K) evaluation time
4671 b) exact low rank one, with O(N*K^2) evaluation time
4672 c) exact robust one, with O(N^3+K*N^2) evaluation time
4673where K is a total number of general linear and nonlinear constraints (box
4674ones are not counted).
4675
4676It also provides special unpreconditioned mode of operation which can be
4677used for test purposes. Comments below discuss low rank preconditioner.
4678
4679Exact low-rank preconditioner uses Woodbury matrix identity to build
4680quadratic model of the penalized function. It has following features:
4681* no special assumptions about orthogonality of constraints
4682* preconditioner evaluation is optimized for K<<N. Its cost is O(N*K^2),
4683 so it may become prohibitively slow for K>=N.
4684* finally, stability of the process is guaranteed only for K<<N. Woodbury
4685 update often fail for K>=N due to degeneracy of intermediate matrices.
4686 That's why we recommend to use "exact robust" preconditioner for such
4687 cases.
4688
4689RECOMMENDATIONS
4690
4691We recommend to choose between "exact low rank" and "exact robust"
4692preconditioners, with "low rank" version being chosen when you know in
4693advance that total count of non-box constraints won't exceed N, and "robust"
4694version being chosen when you need bulletproof solution.
4695
4696INPUT PARAMETERS:
4697 State - structure stores algorithm state
4698 UpdateFreq- update frequency. Preconditioner is rebuilt after every
4699 UpdateFreq iterations. Recommended value: 10 or higher.
4700 Zero value means that good default value will be used.
4701
4702 -- ALGLIB --
4703 Copyright 26.09.2014 by Bochkanov Sergey
4704*************************************************************************/
4705void minnlcsetprecexactlowrank(const minnlcstate &state, const ae_int_t updatefreq);
4706
4707
4708/*************************************************************************
4709This function sets preconditioner to "exact robust" mode.
4710
4711Preconditioning is very important for convergence of Augmented Lagrangian
4712algorithm because presence of penalty term makes problem ill-conditioned.
4713Difference between performance of preconditioned and unpreconditioned
4714methods can be as large as 100x!
4715
4716MinNLC optimizer may use following preconditioners, each with its own
4717benefits and drawbacks:
4718 a) inexact LBFGS-based, with O(N*K) evaluation time
4719 b) exact low rank one, with O(N*K^2) evaluation time
4720 c) exact robust one, with O(N^3+K*N^2) evaluation time
4721where K is a total number of general linear and nonlinear constraints (box
4722ones are not counted).
4723
4724It also provides special unpreconditioned mode of operation which can be
4725used for test purposes. Comments below discuss robust preconditioner.
4726
4727Exact robust preconditioner uses Cholesky decomposition to invert
4728approximate Hessian matrix H=D+W'*C*W (where D stands for diagonal terms
4729of Hessian, combined result of initial scaling matrix and penalty from box
4730constraints; W stands for general linear constraints and linearization of
4731nonlinear ones; C stands for diagonal matrix of penalty coefficients).
4732
4733This preconditioner has following features:
4734* no special assumptions about constraint structure
4735* preconditioner is optimized for stability; unlike "exact low rank"
4736 version which fails for K>=N, this one works well for any value of K.
4737* the only drawback is that is takes O(N^3+K*N^2) time to build it. No
4738 economical Woodbury update is applied even when it makes sense, thus
4739 there are exist situations (K<<N) when "exact low rank" preconditioner
4740 outperforms this one.
4741
4742RECOMMENDATIONS
4743
4744We recommend to choose between "exact low rank" and "exact robust"
4745preconditioners, with "low rank" version being chosen when you know in
4746advance that total count of non-box constraints won't exceed N, and "robust"
4747version being chosen when you need bulletproof solution.
4748
4749INPUT PARAMETERS:
4750 State - structure stores algorithm state
4751 UpdateFreq- update frequency. Preconditioner is rebuilt after every
4752 UpdateFreq iterations. Recommended value: 10 or higher.
4753 Zero value means that good default value will be used.
4754
4755 -- ALGLIB --
4756 Copyright 26.09.2014 by Bochkanov Sergey
4757*************************************************************************/
4758void minnlcsetprecexactrobust(const minnlcstate &state, const ae_int_t updatefreq);
4759
4760
4761/*************************************************************************
4762This function sets preconditioner to "turned off" mode.
4763
4764Preconditioning is very important for convergence of Augmented Lagrangian
4765algorithm because presence of penalty term makes problem ill-conditioned.
4766Difference between performance of preconditioned and unpreconditioned
4767methods can be as large as 100x!
4768
4769MinNLC optimizer may utilize two preconditioners, each with its own
4770benefits and drawbacks: a) inexact LBFGS-based, and b) exact low rank one.
4771It also provides special unpreconditioned mode of operation which can be
4772used for test purposes.
4773
4774This function activates this test mode. Do not use it in production code
4775to solve real-life problems.
4776
4777INPUT PARAMETERS:
4778 State - structure stores algorithm state
4779
4780 -- ALGLIB --
4781 Copyright 26.09.2014 by Bochkanov Sergey
4782*************************************************************************/
4783void minnlcsetprecnone(const minnlcstate &state);
4784
4785
4786/*************************************************************************
4787This function sets maximum step length (after scaling of step vector with
4788respect to variable scales specified by minnlcsetscale() call).
4789
4790INPUT PARAMETERS:
4791 State - structure which stores algorithm state
4792 StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if
4793 you don't want to limit step length.
4794
4795Use this subroutine when you optimize target function which contains exp()
4796or other fast growing functions, and optimization algorithm makes too
4797large steps which leads to overflow. This function allows us to reject
4798steps that are too large (and therefore expose us to the possible
4799overflow) without actually calculating function value at the x+stp*d.
4800
4801 -- ALGLIB --
4802 Copyright 02.04.2010 by Bochkanov Sergey
4803*************************************************************************/
4804void minnlcsetstpmax(const minnlcstate &state, const double stpmax);
4805
4806
4807/*************************************************************************
4808This function tells MinNLC unit to use Augmented Lagrangian algorithm
4809for nonlinearly constrained optimization. This algorithm is a slight
4810modification of one described in "A Modified Barrier-Augmented Lagrangian
4811Method for Constrained Minimization (1999)" by D.GOLDFARB, R.POLYAK,
4812K. SCHEINBERG, I.YUZEFOVICH.
4813
4814Augmented Lagrangian algorithm works by converting problem of minimizing
4815F(x) subject to equality/inequality constraints to unconstrained problem
4816of the form
4817
4818 min[ f(x) +
4819 + Rho*PENALTY_EQ(x) + SHIFT_EQ(x,Nu1) +
4820 + Rho*PENALTY_INEQ(x) + SHIFT_INEQ(x,Nu2) ]
4821
4822where:
4823* Rho is a fixed penalization coefficient
4824* PENALTY_EQ(x) is a penalty term, which is used to APPROXIMATELY enforce
4825 equality constraints
4826* SHIFT_EQ(x) is a special "shift" term which is used to "fine-tune"
4827 equality constraints, greatly increasing precision
4828* PENALTY_INEQ(x) is a penalty term which is used to approximately enforce
4829 inequality constraints
4830* SHIFT_INEQ(x) is a special "shift" term which is used to "fine-tune"
4831 inequality constraints, greatly increasing precision
4832* Nu1/Nu2 are vectors of Lagrange coefficients which are fine-tuned during
4833 outer iterations of algorithm
4834
4835This version of AUL algorithm uses preconditioner, which greatly
4836accelerates convergence. Because this algorithm is similar to penalty
4837methods, it may perform steps into infeasible area. All kinds of
4838constraints (boundary, linear and nonlinear ones) may be violated in
4839intermediate points - and in the solution. However, properly configured
4840AUL method is significantly better at handling constraints than barrier
4841and/or penalty methods.
4842
4843The very basic outline of algorithm is given below:
48441) first outer iteration is performed with "default" values of Lagrange
4845 multipliers Nu1/Nu2. Solution quality is low (candidate point can be
4846 too far away from true solution; large violation of constraints is
4847 possible) and is comparable with that of penalty methods.
48482) subsequent outer iterations refine Lagrange multipliers and improve
4849 quality of the solution.
4850
4851INPUT PARAMETERS:
4852 State - structure which stores algorithm state
4853 Rho - penalty coefficient, Rho>0:
4854 * large enough that algorithm converges with desired
4855 precision. Minimum value is 10*max(S'*diag(H)*S), where
4856 S is a scale matrix (set by MinNLCSetScale) and H is a
4857 Hessian of the function being minimized. If you can not
4858 easily estimate Hessian norm, see our recommendations
4859 below.
4860 * not TOO large to prevent ill-conditioning
4861 * for unit-scale problems (variables and Hessian have unit
4862 magnitude), Rho=100 or Rho=1000 can be used.
4863 * it is important to note that Rho is internally multiplied
4864 by scaling matrix, i.e. optimum value of Rho depends on
4865 scale of variables specified by MinNLCSetScale().
4866 ItsCnt - number of outer iterations:
4867 * ItsCnt=0 means that small number of outer iterations is
4868 automatically chosen (10 iterations in current version).
4869 * ItsCnt=1 means that AUL algorithm performs just as usual
4870 barrier method.
4871 * ItsCnt>1 means that AUL algorithm performs specified
4872 number of outer iterations
4873
4874HOW TO CHOOSE PARAMETERS
4875
4876Nonlinear optimization is a tricky area and Augmented Lagrangian algorithm
4877is sometimes hard to tune. Good values of Rho and ItsCnt are problem-
4878specific. In order to help you we prepared following set of
4879recommendations:
4880
4881* for unit-scale problems (variables and Hessian have unit magnitude),
4882 Rho=100 or Rho=1000 can be used.
4883
4884* start from some small value of Rho and solve problem with just one
4885 outer iteration (ItcCnt=1). In this case algorithm behaves like penalty
4886 method. Increase Rho in 2x or 10x steps until you see that one outer
4887 iteration returns point which is "rough approximation to solution".
4888
4889 It is very important to have Rho so large that penalty term becomes
4890 constraining i.e. modified function becomes highly convex in constrained
4891 directions.
4892
4893 From the other side, too large Rho may prevent you from converging to
4894 the solution. You can diagnose it by studying number of inner iterations
4895 performed by algorithm: too few (5-10 on 1000-dimensional problem) or
4896 too many (orders of magnitude more than dimensionality) usually means
4897 that Rho is too large.
4898
4899* with just one outer iteration you usually have low-quality solution.
4900 Some constraints can be violated with very large margin, while other
4901 ones (which are NOT violated in the true solution) can push final point
4902 too far in the inner area of the feasible set.
4903
4904 For example, if you have constraint x0>=0 and true solution x0=1, then
4905 merely a presence of "x0>=0" will introduce a bias towards larger values
4906 of x0. Say, algorithm may stop at x0=1.5 instead of 1.0.
4907
4908* after you found good Rho, you may increase number of outer iterations.
4909 ItsCnt=10 is a good value. Subsequent outer iteration will refine values
4910 of Lagrange multipliers. Constraints which were violated will be
4911 enforced, inactive constraints will be dropped (corresponding multipliers
4912 will be decreased). Ideally, you should see 10-1000x improvement in
4913 constraint handling (constraint violation is reduced).
4914
4915* if you see that algorithm converges to vicinity of solution, but
4916 additional outer iterations do not refine solution, it may mean that
4917 algorithm is unstable - it wanders around true solution, but can not
4918 approach it. Sometimes algorithm may be stabilized by increasing Rho one
4919 more time, making it 5x or 10x larger.
4920
4921SCALING OF CONSTRAINTS [IMPORTANT]
4922
4923AUL optimizer scales variables according to scale specified by
4924MinNLCSetScale() function, so it can handle problems with badly scaled
4925variables (as long as we KNOW their scales). However, because function
4926being optimized is a mix of original function and constraint-dependent
4927penalty functions, it is important to rescale both variables AND
4928constraints.
4929
4930Say, if you minimize f(x)=x^2 subject to 1000000*x>=0, then you have
4931constraint whose scale is different from that of target function (another
4932example is 0.000001*x>=0). It is also possible to have constraints whose
4933scales are misaligned: 1000000*x0>=0, 0.000001*x1<=0. Inappropriate
4934scaling may ruin convergence because minimizing x^2 subject to x>=0 is NOT
4935same as minimizing it subject to 1000000*x>=0.
4936
4937Because we know coefficients of boundary/linear constraints, we can
4938automatically rescale and normalize them. However, there is no way to
4939automatically rescale nonlinear constraints Gi(x) and Hi(x) - they are
4940black boxes.
4941
4942It means that YOU are the one who is responsible for correct scaling of
4943nonlinear constraints Gi(x) and Hi(x). We recommend you to rescale
4944nonlinear constraints in such way that I-th component of dG/dX (or dH/dx)
4945has magnitude approximately equal to 1/S[i] (where S is a scale set by
4946MinNLCSetScale() function).
4947
4948WHAT IF IT DOES NOT CONVERGE?
4949
4950It is possible that AUL algorithm fails to converge to precise values of
4951Lagrange multipliers. It stops somewhere around true solution, but candidate
4952point is still too far from solution, and some constraints are violated.
4953Such kind of failure is specific for Lagrangian algorithms - technically,
4954they stop at some point, but this point is not constrained solution.
4955
4956There are exist several reasons why algorithm may fail to converge:
4957a) too loose stopping criteria for inner iteration
4958b) degenerate, redundant constraints
4959c) target function has unconstrained extremum exactly at the boundary of
4960 some constraint
4961d) numerical noise in the target function
4962
4963In all these cases algorithm is unstable - each outer iteration results in
4964large and almost random step which improves handling of some constraints,
4965but violates other ones (ideally outer iterations should form a sequence
4966of progressively decreasing steps towards solution).
4967
4968First reason possible is that too loose stopping criteria for inner
4969iteration were specified. Augmented Lagrangian algorithm solves a sequence
4970of intermediate problems, and requries each of them to be solved with high
4971precision. Insufficient precision results in incorrect update of Lagrange
4972multipliers.
4973
4974Another reason is that you may have specified degenerate constraints: say,
4975some constraint was repeated twice. In most cases AUL algorithm gracefully
4976handles such situations, but sometimes it may spend too much time figuring
4977out subtle degeneracies in constraint matrix.
4978
4979Third reason is tricky and hard to diagnose. Consider situation when you
4980minimize f=x^2 subject to constraint x>=0. Unconstrained extremum is
4981located exactly at the boundary of constrained area. In this case
4982algorithm will tend to oscillate between negative and positive x. Each
4983time it stops at x<0 it "reinforces" constraint x>=0, and each time it is
4984bounced to x>0 it "relaxes" constraint (and is attracted to x<0).
4985
4986Such situation sometimes happens in problems with hidden symetries.
4987Algorithm is got caught in a loop with Lagrange multipliers being
4988continuously increased/decreased. Luckily, such loop forms after at least
4989three iterations, so this problem can be solved by DECREASING number of
4990outer iterations down to 1-2 and increasing penalty coefficient Rho as
4991much as possible.
4992
4993Final reason is numerical noise. AUL algorithm is robust against moderate
4994noise (more robust than, say, active set methods), but large noise may
4995destabilize algorithm.
4996
4997 -- ALGLIB --
4998 Copyright 06.06.2014 by Bochkanov Sergey
4999*************************************************************************/
5000void minnlcsetalgoaul(const minnlcstate &state, const double rho, const ae_int_t itscnt);
5001
5002
5003/*************************************************************************
5004This function turns on/off reporting.
5005
5006INPUT PARAMETERS:
5007 State - structure which stores algorithm state
5008 NeedXRep- whether iteration reports are needed or not
5009
5010If NeedXRep is True, algorithm will call rep() callback function if it is
5011provided to MinNLCOptimize().
5012
5013NOTE: algorithm passes two parameters to rep() callback - current point
5014 and penalized function value at current point. Important - function
5015 value which is returned is NOT function being minimized. It is sum
5016 of the value of the function being minimized - and penalty term.
5017
5018 -- ALGLIB --
5019 Copyright 28.11.2010 by Bochkanov Sergey
5020*************************************************************************/
5021void minnlcsetxrep(const minnlcstate &state, const bool needxrep);
5022
5023
5024/*************************************************************************
5025This function provides reverse communication interface
5026Reverse communication interface is not documented or recommended to use.
5027See below for functions which provide better documented API
5028*************************************************************************/
5029bool minnlciteration(const minnlcstate &state);
5030
5031
5032/*************************************************************************
5033This family of functions is used to launcn iterations of nonlinear optimizer
5034
5035These functions accept following parameters:
5036 state - algorithm state
5037 fvec - callback which calculates function vector fi[]
5038 at given point x
5039 jac - callback which calculates function vector fi[]
5040 and Jacobian jac at given point x
5041 rep - optional callback which is called after each iteration
5042 can be NULL
5043 ptr - optional pointer which is passed to func/grad/hess/jac/rep
5044 can be NULL
5045
5046
5047NOTES:
5048
50491. This function has two different implementations: one which uses exact
5050 (analytical) user-supplied Jacobian, and one which uses only function
5051 vector and numerically differentiates function in order to obtain
5052 gradient.
5053
5054 Depending on the specific function used to create optimizer object
5055 you should choose appropriate variant of MinNLCOptimize() - one which
5056 accepts function AND Jacobian or one which accepts ONLY function.
5057
5058 Be careful to choose variant of MinNLCOptimize() which corresponds to
5059 your optimization scheme! Table below lists different combinations of
5060 callback (function/gradient) passed to MinNLCOptimize() and specific
5061 function used to create optimizer.
5062
5063
5064 | USER PASSED TO MinNLCOptimize()
5065 CREATED WITH | function only | function and gradient
5066 ------------------------------------------------------------
5067 MinNLCCreateF() | works FAILS
5068 MinNLCCreate() | FAILS works
5069
5070 Here "FAILS" denotes inappropriate combinations of optimizer creation
5071 function and MinNLCOptimize() version. Attemps to use such
5072 combination will lead to exception. Either you did not pass gradient
5073 when it WAS needed or you passed gradient when it was NOT needed.
5074
5075 -- ALGLIB --
5076 Copyright 06.06.2014 by Bochkanov Sergey
5077
5078*************************************************************************/
5079void minnlcoptimize(minnlcstate &state,
5080 void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
5081 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
5082 void *ptr = NULL);
5083void minnlcoptimize(minnlcstate &state,
5084 void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
5085 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
5086 void *ptr = NULL);
5087
5088
5089/*************************************************************************
5090MinNLC results
5091
5092INPUT PARAMETERS:
5093 State - algorithm state
5094
5095OUTPUT PARAMETERS:
5096 X - array[0..N-1], solution
5097 Rep - optimization report. You should check Rep.TerminationType
5098 in order to distinguish successful termination from
5099 unsuccessful one:
5100 * -8 internal integrity control detected infinite or
5101 NAN values in function/gradient. Abnormal
5102 termination signalled.
5103 * -7 gradient verification failed.
5104 See MinNLCSetGradientCheck() for more information.
5105 * 1 relative function improvement is no more than EpsF.
5106 * 2 scaled step is no more than EpsX.
5107 * 4 scaled gradient norm is no more than EpsG.
5108 * 5 MaxIts steps was taken
5109 More information about fields of this structure can be
5110 found in the comments on MinNLCReport datatype.
5111
5112 -- ALGLIB --
5113 Copyright 06.06.2014 by Bochkanov Sergey
5114*************************************************************************/
5115void minnlcresults(const minnlcstate &state, real_1d_array &x, minnlcreport &rep);
5116
5117
5118/*************************************************************************
5119NLC results
5120
5121Buffered implementation of MinNLCResults() which uses pre-allocated buffer
5122to store X[]. If buffer size is too small, it resizes buffer. It is
5123intended to be used in the inner cycles of performance critical algorithms
5124where array reallocation penalty is too large to be ignored.
5125
5126 -- ALGLIB --
5127 Copyright 28.11.2010 by Bochkanov Sergey
5128*************************************************************************/
5129void minnlcresultsbuf(const minnlcstate &state, real_1d_array &x, minnlcreport &rep);
5130
5131
5132/*************************************************************************
5133This subroutine restarts algorithm from new point.
5134All optimization parameters (including constraints) are left unchanged.
5135
5136This function allows to solve multiple optimization problems (which
5137must have same number of dimensions) without object reallocation penalty.
5138
5139INPUT PARAMETERS:
5140 State - structure previously allocated with MinNLCCreate call.
5141 X - new starting point.
5142
5143 -- ALGLIB --
5144 Copyright 28.11.2010 by Bochkanov Sergey
5145*************************************************************************/
5146void minnlcrestartfrom(const minnlcstate &state, const real_1d_array &x);
5147
5148
5149/*************************************************************************
5150This subroutine turns on verification of the user-supplied analytic
5151gradient:
5152* user calls this subroutine before optimization begins
5153* MinNLCOptimize() is called
5154* prior to actual optimization, for each component of parameters being
5155 optimized X[i] algorithm performs following steps:
5156 * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
5157 where X[i] is i-th component of the initial point and S[i] is a scale
5158 of i-th parameter
5159 * F(X) is evaluated at these trial points
5160 * we perform one more evaluation in the middle point of the interval
5161 * we build cubic model using function values and derivatives at trial
5162 points and we compare its prediction with actual value in the middle
5163 point
5164 * in case difference between prediction and actual value is higher than
5165 some predetermined threshold, algorithm stops with completion code -7;
5166 Rep.VarIdx is set to index of the parameter with incorrect derivative,
5167 and Rep.FuncIdx is set to index of the function.
5168* after verification is over, algorithm proceeds to the actual optimization.
5169
5170NOTE 1: verification needs N (parameters count) gradient evaluations. It
5171 is very costly and you should use it only for low dimensional
5172 problems, when you want to be sure that you've correctly
5173 calculated analytic derivatives. You should not use it in the
5174 production code (unless you want to check derivatives provided by
5175 some third party).
5176
5177NOTE 2: you should carefully choose TestStep. Value which is too large
5178 (so large that function behaviour is significantly non-cubic) will
5179 lead to false alarms. You may use different step for different
5180 parameters by means of setting scale with MinNLCSetScale().
5181
5182NOTE 3: this function may lead to false positives. In case it reports that
5183 I-th derivative was calculated incorrectly, you may decrease test
5184 step and try one more time - maybe your function changes too
5185 sharply and your step is too large for such rapidly chanding
5186 function.
5187
5188INPUT PARAMETERS:
5189 State - structure used to store algorithm state
5190 TestStep - verification step:
5191 * TestStep=0 turns verification off
5192 * TestStep>0 activates verification
5193
5194 -- ALGLIB --
5195 Copyright 15.06.2014 by Bochkanov Sergey
5196*************************************************************************/
5197void minnlcsetgradientcheck(const minnlcstate &state, const double teststep);
5198
5199/*************************************************************************
5200 BOX CONSTRAINED OPTIMIZATION
5201 WITH FAST ACTIVATION OF MULTIPLE BOX CONSTRAINTS
5202
5203DESCRIPTION:
5204The subroutine minimizes function F(x) of N arguments subject to box
5205constraints (with some of box constraints actually being equality ones).
5206
5207This optimizer uses algorithm similar to that of MinBLEIC (optimizer with
5208general linear constraints), but presence of box-only constraints allows
5209us to use faster constraint activation strategies. On large-scale problems,
5210with multiple constraints active at the solution, this optimizer can be
5211several times faster than BLEIC.
5212
5213REQUIREMENTS:
5214* user must provide function value and gradient
5215* starting point X0 must be feasible or
5216 not too far away from the feasible set
5217* grad(f) must be Lipschitz continuous on a level set:
5218 L = { x : f(x)<=f(x0) }
5219* function must be defined everywhere on the feasible set F
5220
5221USAGE:
5222
5223Constrained optimization if far more complex than the unconstrained one.
5224Here we give very brief outline of the BC optimizer. We strongly recommend
5225you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide
5226on optimization, which is available at http://www.alglib.net/optimization/
5227
52281. User initializes algorithm state with MinBCCreate() call
5229
52302. USer adds box constraints by calling MinBCSetBC() function.
5231
52323. User sets stopping conditions with MinBCSetCond().
5233
52344. User calls MinBCOptimize() function which takes algorithm state and
5235 pointer (delegate, etc.) to callback function which calculates F/G.
5236
52375. User calls MinBCResults() to get solution
5238
52396. Optionally user may call MinBCRestartFrom() to solve another problem
5240 with same N but another starting point.
5241 MinBCRestartFrom() allows to reuse already initialized structure.
5242
5243
5244INPUT PARAMETERS:
5245 N - problem dimension, N>0:
5246 * if given, only leading N elements of X are used
5247 * if not given, automatically determined from size ofX
5248 X - starting point, array[N]:
5249 * it is better to set X to a feasible point
5250 * but X can be infeasible, in which case algorithm will try
5251 to find feasible point first, using X as initial
5252 approximation.
5253
5254OUTPUT PARAMETERS:
5255 State - structure stores algorithm state
5256
5257 -- ALGLIB --
5258 Copyright 28.11.2010 by Bochkanov Sergey
5259*************************************************************************/
5260void minbccreate(const ae_int_t n, const real_1d_array &x, minbcstate &state);
5261void minbccreate(const real_1d_array &x, minbcstate &state);
5262
5263
5264/*************************************************************************
5265The subroutine is finite difference variant of MinBCCreate(). It uses
5266finite differences in order to differentiate target function.
5267
5268Description below contains information which is specific to this function
5269only. We recommend to read comments on MinBCCreate() in order to get
5270more information about creation of BC optimizer.
5271
5272INPUT PARAMETERS:
5273 N - problem dimension, N>0:
5274 * if given, only leading N elements of X are used
5275 * if not given, automatically determined from size of X
5276 X - starting point, array[0..N-1].
5277 DiffStep- differentiation step, >0
5278
5279OUTPUT PARAMETERS:
5280 State - structure which stores algorithm state
5281
5282NOTES:
52831. algorithm uses 4-point central formula for differentiation.
52842. differentiation step along I-th axis is equal to DiffStep*S[I] where
5285 S[] is scaling vector which can be set by MinBCSetScale() call.
52863. we recommend you to use moderate values of differentiation step. Too
5287 large step will result in too large truncation errors, while too small
5288 step will result in too large numerical errors. 1.0E-6 can be good
5289 value to start with.
52904. Numerical differentiation is very inefficient - one gradient
5291 calculation needs 4*N function evaluations. This function will work for
5292 any N - either small (1...10), moderate (10...100) or large (100...).
5293 However, performance penalty will be too severe for any N's except for
5294 small ones.
5295 We should also say that code which relies on numerical differentiation
5296 is less robust and precise. CG needs exact gradient values. Imprecise
5297 gradient may slow down convergence, especially on highly nonlinear
5298 problems.
5299 Thus we recommend to use this function for fast prototyping on small-
5300 dimensional problems only, and to implement analytical gradient as soon
5301 as possible.
5302
5303 -- ALGLIB --
5304 Copyright 16.05.2011 by Bochkanov Sergey
5305*************************************************************************/
5306void minbccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minbcstate &state);
5307void minbccreatef(const real_1d_array &x, const double diffstep, minbcstate &state);
5308
5309
5310/*************************************************************************
5311This function sets boundary constraints for BC optimizer.
5312
5313Boundary constraints are inactive by default (after initial creation).
5314They are preserved after algorithm restart with MinBCRestartFrom().
5315
5316INPUT PARAMETERS:
5317 State - structure stores algorithm state
5318 BndL - lower bounds, array[N].
5319 If some (all) variables are unbounded, you may specify
5320 very small number or -INF.
5321 BndU - upper bounds, array[N].
5322 If some (all) variables are unbounded, you may specify
5323 very large number or +INF.
5324
5325NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
5326variable will be "frozen" at X[i]=BndL[i]=BndU[i].
5327
5328NOTE 2: this solver has following useful properties:
5329* bound constraints are always satisfied exactly
5330* function is evaluated only INSIDE area specified by bound constraints,
5331 even when numerical differentiation is used (algorithm adjusts nodes
5332 according to boundary constraints)
5333
5334 -- ALGLIB --
5335 Copyright 28.11.2010 by Bochkanov Sergey
5336*************************************************************************/
5337void minbcsetbc(const minbcstate &state, const real_1d_array &bndl, const real_1d_array &bndu);
5338
5339
5340/*************************************************************************
5341This function sets stopping conditions for the optimizer.
5342
5343INPUT PARAMETERS:
5344 State - structure which stores algorithm state
5345 EpsG - >=0
5346 The subroutine finishes its work if the condition
5347 |v|<EpsG is satisfied, where:
5348 * |.| means Euclidian norm
5349 * v - scaled gradient vector, v[i]=g[i]*s[i]
5350 * g - gradient
5351 * s - scaling coefficients set by MinBCSetScale()
5352 EpsF - >=0
5353 The subroutine finishes its work if on k+1-th iteration
5354 the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
5355 is satisfied.
5356 EpsX - >=0
5357 The subroutine finishes its work if on k+1-th iteration
5358 the condition |v|<=EpsX is fulfilled, where:
5359 * |.| means Euclidian norm
5360 * v - scaled step vector, v[i]=dx[i]/s[i]
5361 * dx - step vector, dx=X(k+1)-X(k)
5362 * s - scaling coefficients set by MinBCSetScale()
5363 MaxIts - maximum number of iterations. If MaxIts=0, the number of
5364 iterations is unlimited.
5365
5366Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
5367to automatic stopping criterion selection.
5368
5369NOTE: when SetCond() called with non-zero MaxIts, BC solver may perform
5370 slightly more than MaxIts iterations. I.e., MaxIts sets non-strict
5371 limit on iterations count.
5372
5373 -- ALGLIB --
5374 Copyright 28.11.2010 by Bochkanov Sergey
5375*************************************************************************/
5376void minbcsetcond(const minbcstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
5377
5378
5379/*************************************************************************
5380This function sets scaling coefficients for BC optimizer.
5381
5382ALGLIB optimizers use scaling matrices to test stopping conditions (step
5383size and gradient are scaled before comparison with tolerances). Scale of
5384the I-th variable is a translation invariant measure of:
5385a) "how large" the variable is
5386b) how large the step should be to make significant changes in the function
5387
5388Scaling is also used by finite difference variant of the optimizer - step
5389along I-th axis is equal to DiffStep*S[I].
5390
5391In most optimizers (and in the BC too) scaling is NOT a form of
5392preconditioning. It just affects stopping conditions. You should set
5393preconditioner by separate call to one of the MinBCSetPrec...()
5394functions.
5395
5396There is a special preconditioning mode, however, which uses scaling
5397coefficients to form diagonal preconditioning matrix. You can turn this
5398mode on, if you want. But you should understand that scaling is not the
5399same thing as preconditioning - these are two different, although related
5400forms of tuning solver.
5401
5402INPUT PARAMETERS:
5403 State - structure stores algorithm state
5404 S - array[N], non-zero scaling coefficients
5405 S[i] may be negative, sign doesn't matter.
5406
5407 -- ALGLIB --
5408 Copyright 14.01.2011 by Bochkanov Sergey
5409*************************************************************************/
5410void minbcsetscale(const minbcstate &state, const real_1d_array &s);
5411
5412
5413/*************************************************************************
5414Modification of the preconditioner: preconditioning is turned off.
5415
5416INPUT PARAMETERS:
5417 State - structure which stores algorithm state
5418
5419 -- ALGLIB --
5420 Copyright 13.10.2010 by Bochkanov Sergey
5421*************************************************************************/
5422void minbcsetprecdefault(const minbcstate &state);
5423
5424
5425/*************************************************************************
5426Modification of the preconditioner: diagonal of approximate Hessian is
5427used.
5428
5429INPUT PARAMETERS:
5430 State - structure which stores algorithm state
5431 D - diagonal of the approximate Hessian, array[0..N-1],
5432 (if larger, only leading N elements are used).
5433
5434NOTE 1: D[i] should be positive. Exception will be thrown otherwise.
5435
5436NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
5437
5438 -- ALGLIB --
5439 Copyright 13.10.2010 by Bochkanov Sergey
5440*************************************************************************/
5441void minbcsetprecdiag(const minbcstate &state, const real_1d_array &d);
5442
5443
5444/*************************************************************************
5445Modification of the preconditioner: scale-based diagonal preconditioning.
5446
5447This preconditioning mode can be useful when you don't have approximate
5448diagonal of Hessian, but you know that your variables are badly scaled
5449(for example, one variable is in [1,10], and another in [1000,100000]),
5450and most part of the ill-conditioning comes from different scales of vars.
5451
5452In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2),
5453can greatly improve convergence.
5454
5455IMPRTANT: you should set scale of your variables with MinBCSetScale()
5456call (before or after MinBCSetPrecScale() call). Without knowledge of
5457the scale of your variables scale-based preconditioner will be just unit
5458matrix.
5459
5460INPUT PARAMETERS:
5461 State - structure which stores algorithm state
5462
5463 -- ALGLIB --
5464 Copyright 13.10.2010 by Bochkanov Sergey
5465*************************************************************************/
5466void minbcsetprecscale(const minbcstate &state);
5467
5468
5469/*************************************************************************
5470This function turns on/off reporting.
5471
5472INPUT PARAMETERS:
5473 State - structure which stores algorithm state
5474 NeedXRep- whether iteration reports are needed or not
5475
5476If NeedXRep is True, algorithm will call rep() callback function if it is
5477provided to MinBCOptimize().
5478
5479 -- ALGLIB --
5480 Copyright 28.11.2010 by Bochkanov Sergey
5481*************************************************************************/
5482void minbcsetxrep(const minbcstate &state, const bool needxrep);
5483
5484
5485/*************************************************************************
5486This function sets maximum step length
5487
5488INPUT PARAMETERS:
5489 State - structure which stores algorithm state
5490 StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't
5491 want to limit step length.
5492
5493Use this subroutine when you optimize target function which contains exp()
5494or other fast growing functions, and optimization algorithm makes too
5495large steps which lead to overflow. This function allows us to reject
5496steps that are too large (and therefore expose us to the possible
5497overflow) without actually calculating function value at the x+stp*d.
5498
5499 -- ALGLIB --
5500 Copyright 02.04.2010 by Bochkanov Sergey
5501*************************************************************************/
5502void minbcsetstpmax(const minbcstate &state, const double stpmax);
5503
5504
5505/*************************************************************************
5506This function provides reverse communication interface
5507Reverse communication interface is not documented or recommended to use.
5508See below for functions which provide better documented API
5509*************************************************************************/
5510bool minbciteration(const minbcstate &state);
5511
5512
5513/*************************************************************************
5514This family of functions is used to launcn iterations of nonlinear optimizer
5515
5516These functions accept following parameters:
5517 state - algorithm state
5518 func - callback which calculates function (or merit function)
5519 value func at given point x
5520 grad - callback which calculates function (or merit function)
5521 value func and gradient grad at given point x
5522 rep - optional callback which is called after each iteration
5523 can be NULL
5524 ptr - optional pointer which is passed to func/grad/hess/jac/rep
5525 can be NULL
5526
5527NOTES:
5528
55291. This function has two different implementations: one which uses exact
5530 (analytical) user-supplied gradient, and one which uses function value
5531 only and numerically differentiates function in order to obtain
5532 gradient.
5533
5534 Depending on the specific function used to create optimizer object
5535 (either MinBCCreate() for analytical gradient or MinBCCreateF()
5536 for numerical differentiation) you should choose appropriate variant of
5537 MinBCOptimize() - one which accepts function AND gradient or one
5538 which accepts function ONLY.
5539
5540 Be careful to choose variant of MinBCOptimize() which corresponds to
5541 your optimization scheme! Table below lists different combinations of
5542 callback (function/gradient) passed to MinBCOptimize() and specific
5543 function used to create optimizer.
5544
5545
5546 | USER PASSED TO MinBCOptimize()
5547 CREATED WITH | function only | function and gradient
5548 ------------------------------------------------------------
5549 MinBCCreateF() | works FAILS
5550 MinBCCreate() | FAILS works
5551
5552 Here "FAIL" denotes inappropriate combinations of optimizer creation
5553 function and MinBCOptimize() version. Attemps to use such
5554 combination (for example, to create optimizer with MinBCCreateF()
5555 and to pass gradient information to MinCGOptimize()) will lead to
5556 exception being thrown. Either you did not pass gradient when it WAS
5557 needed or you passed gradient when it was NOT needed.
5558
5559 -- ALGLIB --
5560 Copyright 28.11.2010 by Bochkanov Sergey
5561
5562*************************************************************************/
5563void minbcoptimize(minbcstate &state,
5564 void (*func)(const real_1d_array &x, double &func, void *ptr),
5565 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
5566 void *ptr = NULL);
5567void minbcoptimize(minbcstate &state,
5568 void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
5569 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
5570 void *ptr = NULL);
5571
5572
5573/*************************************************************************
5574BC results
5575
5576INPUT PARAMETERS:
5577 State - algorithm state
5578
5579OUTPUT PARAMETERS:
5580 X - array[0..N-1], solution
5581 Rep - optimization report. You should check Rep.TerminationType
5582 in order to distinguish successful termination from
5583 unsuccessful one:
5584 * -8 internal integrity control detected infinite or
5585 NAN values in function/gradient. Abnormal
5586 termination signalled.
5587 * -7 gradient verification failed.
5588 See MinBCSetGradientCheck() for more information.
5589 * -3 inconsistent constraints.
5590 * 1 relative function improvement is no more than EpsF.
5591 * 2 scaled step is no more than EpsX.
5592 * 4 scaled gradient norm is no more than EpsG.
5593 * 5 MaxIts steps was taken
5594 * 8 terminated by user who called minbcrequesttermination().
5595 X contains point which was "current accepted" when
5596 termination request was submitted.
5597 More information about fields of this structure can be
5598 found in the comments on MinBCReport datatype.
5599
5600 -- ALGLIB --
5601 Copyright 28.11.2010 by Bochkanov Sergey
5602*************************************************************************/
5603void minbcresults(const minbcstate &state, real_1d_array &x, minbcreport &rep);
5604
5605
5606/*************************************************************************
5607BC results
5608
5609Buffered implementation of MinBCResults() which uses pre-allocated buffer
5610to store X[]. If buffer size is too small, it resizes buffer. It is
5611intended to be used in the inner cycles of performance critical algorithms
5612where array reallocation penalty is too large to be ignored.
5613
5614 -- ALGLIB --
5615 Copyright 28.11.2010 by Bochkanov Sergey
5616*************************************************************************/
5617void minbcresultsbuf(const minbcstate &state, real_1d_array &x, minbcreport &rep);
5618
5619
5620/*************************************************************************
5621This subroutine restarts algorithm from new point.
5622All optimization parameters (including constraints) are left unchanged.
5623
5624This function allows to solve multiple optimization problems (which
5625must have same number of dimensions) without object reallocation penalty.
5626
5627INPUT PARAMETERS:
5628 State - structure previously allocated with MinBCCreate call.
5629 X - new starting point.
5630
5631 -- ALGLIB --
5632 Copyright 28.11.2010 by Bochkanov Sergey
5633*************************************************************************/
5634void minbcrestartfrom(const minbcstate &state, const real_1d_array &x);
5635
5636
5637/*************************************************************************
5638This subroutine submits request for termination of running optimizer. It
5639should be called from user-supplied callback when user decides that it is
5640time to "smoothly" terminate optimization process. As result, optimizer
5641stops at point which was "current accepted" when termination request was
5642submitted and returns error code 8 (successful termination).
5643
5644INPUT PARAMETERS:
5645 State - optimizer structure
5646
5647NOTE: after request for termination optimizer may perform several
5648 additional calls to user-supplied callbacks. It does NOT guarantee
5649 to stop immediately - it just guarantees that these additional calls
5650 will be discarded later.
5651
5652NOTE: calling this function on optimizer which is NOT running will have no
5653 effect.
5654
5655NOTE: multiple calls to this function are possible. First call is counted,
5656 subsequent calls are silently ignored.
5657
5658 -- ALGLIB --
5659 Copyright 08.10.2014 by Bochkanov Sergey
5660*************************************************************************/
5661void minbcrequesttermination(const minbcstate &state);
5662
5663
5664/*************************************************************************
5665This subroutine turns on verification of the user-supplied analytic
5666gradient:
5667* user calls this subroutine before optimization begins
5668* MinBCOptimize() is called
5669* prior to actual optimization, for each component of parameters being
5670 optimized X[i] algorithm performs following steps:
5671 * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
5672 where X[i] is i-th component of the initial point and S[i] is a scale
5673 of i-th parameter
5674 * if needed, steps are bounded with respect to constraints on X[]
5675 * F(X) is evaluated at these trial points
5676 * we perform one more evaluation in the middle point of the interval
5677 * we build cubic model using function values and derivatives at trial
5678 points and we compare its prediction with actual value in the middle
5679 point
5680 * in case difference between prediction and actual value is higher than
5681 some predetermined threshold, algorithm stops with completion code -7;
5682 Rep.VarIdx is set to index of the parameter with incorrect derivative.
5683* after verification is over, algorithm proceeds to the actual optimization.
5684
5685NOTE 1: verification needs N (parameters count) gradient evaluations. It
5686 is very costly and you should use it only for low dimensional
5687 problems, when you want to be sure that you've correctly
5688 calculated analytic derivatives. You should not use it in the
5689 production code (unless you want to check derivatives provided by
5690 some third party).
5691
5692NOTE 2: you should carefully choose TestStep. Value which is too large
5693 (so large that function behaviour is significantly non-cubic) will
5694 lead to false alarms. You may use different step for different
5695 parameters by means of setting scale with MinBCSetScale().
5696
5697NOTE 3: this function may lead to false positives. In case it reports that
5698 I-th derivative was calculated incorrectly, you may decrease test
5699 step and try one more time - maybe your function changes too
5700 sharply and your step is too large for such rapidly chanding
5701 function.
5702
5703INPUT PARAMETERS:
5704 State - structure used to store algorithm state
5705 TestStep - verification step:
5706 * TestStep=0 turns verification off
5707 * TestStep>0 activates verification
5708
5709 -- ALGLIB --
5710 Copyright 15.06.2012 by Bochkanov Sergey
5711*************************************************************************/
5712void minbcsetgradientcheck(const minbcstate &state, const double teststep);
5713
5714/*************************************************************************
5715 NONSMOOTH NONCONVEX OPTIMIZATION
5716 SUBJECT TO BOX/LINEAR/NONLINEAR-NONSMOOTH CONSTRAINTS
5717
5718DESCRIPTION:
5719
5720The subroutine minimizes function F(x) of N arguments subject to any
5721combination of:
5722* bound constraints
5723* linear inequality constraints
5724* linear equality constraints
5725* nonlinear equality constraints Gi(x)=0
5726* nonlinear inequality constraints Hi(x)<=0
5727
5728IMPORTANT: see MinNSSetAlgoAGS for important information on performance
5729 restrictions of AGS solver.
5730
5731REQUIREMENTS:
5732* starting point X0 must be feasible or not too far away from the feasible
5733 set
5734* F(), G(), H() are continuous, locally Lipschitz and continuously (but
5735 not necessarily twice) differentiable in an open dense subset of R^N.
5736 Functions F(), G() and H() may be nonsmooth and non-convex.
5737 Informally speaking, it means that functions are composed of large
5738 differentiable "patches" with nonsmoothness having place only at the
5739 boundaries between these "patches".
5740 Most real-life nonsmooth functions satisfy these requirements. Say,
5741 anything which involves finite number of abs(), min() and max() is very
5742 likely to pass the test.
5743 Say, it is possible to optimize anything of the following:
5744 * f=abs(x0)+2*abs(x1)
5745 * f=max(x0,x1)
5746 * f=sin(max(x0,x1)+abs(x2))
5747* for nonlinearly constrained problems: F() must be bounded from below
5748 without nonlinear constraints (this requirement is due to the fact that,
5749 contrary to box and linear constraints, nonlinear ones require special
5750 handling).
5751* user must provide function value and gradient for F(), H(), G() at all
5752 points where function/gradient can be calculated. If optimizer requires
5753 value exactly at the boundary between "patches" (say, at x=0 for f=abs(x)),
5754 where gradient is not defined, user may resolve tie arbitrarily (in our
5755 case - return +1 or -1 at its discretion).
5756* NS solver supports numerical differentiation, i.e. it may differentiate
5757 your function for you, but it results in 2N increase of function
5758 evaluations. Not recommended unless you solve really small problems. See
5759 minnscreatef() for more information on this functionality.
5760
5761USAGE:
5762
57631. User initializes algorithm state with MinNSCreate() call and chooses
5764 what NLC solver to use. There is some solver which is used by default,
5765 with default settings, but you should NOT rely on default choice. It
5766 may change in future releases of ALGLIB without notice, and no one can
5767 guarantee that new solver will be able to solve your problem with
5768 default settings.
5769
5770 From the other side, if you choose solver explicitly, you can be pretty
5771 sure that it will work with new ALGLIB releases.
5772
5773 In the current release following solvers can be used:
5774 * AGS solver (activated with MinNSSetAlgoAGS() function)
5775
57762. User adds boundary and/or linear and/or nonlinear constraints by means
5777 of calling one of the following functions:
5778 a) MinNSSetBC() for boundary constraints
5779 b) MinNSSetLC() for linear constraints
5780 c) MinNSSetNLC() for nonlinear constraints
5781 You may combine (a), (b) and (c) in one optimization problem.
5782
57833. User sets scale of the variables with MinNSSetScale() function. It is
5784 VERY important to set scale of the variables, because nonlinearly
5785 constrained problems are hard to solve when variables are badly scaled.
5786
57874. User sets stopping conditions with MinNSSetCond().
5788
57895. Finally, user calls MinNSOptimize() function which takes algorithm
5790 state and pointer (delegate, etc) to callback function which calculates
5791 F/G/H.
5792
57937. User calls MinNSResults() to get solution
5794
57958. Optionally user may call MinNSRestartFrom() to solve another problem
5796 with same N but another starting point. MinNSRestartFrom() allows to
5797 reuse already initialized structure.
5798
5799
5800INPUT PARAMETERS:
5801 N - problem dimension, N>0:
5802 * if given, only leading N elements of X are used
5803 * if not given, automatically determined from size of X
5804 X - starting point, array[N]:
5805 * it is better to set X to a feasible point
5806 * but X can be infeasible, in which case algorithm will try
5807 to find feasible point first, using X as initial
5808 approximation.
5809
5810OUTPUT PARAMETERS:
5811 State - structure stores algorithm state
5812
5813NOTE: minnscreatef() function may be used if you do not have analytic
5814 gradient. This function creates solver which uses numerical
5815 differentiation with user-specified step.
5816
5817 -- ALGLIB --
5818 Copyright 18.05.2015 by Bochkanov Sergey
5819*************************************************************************/
5820void minnscreate(const ae_int_t n, const real_1d_array &x, minnsstate &state);
5821void minnscreate(const real_1d_array &x, minnsstate &state);
5822
5823
5824/*************************************************************************
5825Version of minnscreatef() which uses numerical differentiation. I.e., you
5826do not have to calculate derivatives yourself. However, this version needs
58272N times more function evaluations.
5828
58292-point differentiation formula is used, because more precise 4-point
5830formula is unstable when used on non-smooth functions.
5831
5832INPUT PARAMETERS:
5833 N - problem dimension, N>0:
5834 * if given, only leading N elements of X are used
5835 * if not given, automatically determined from size of X
5836 X - starting point, array[N]:
5837 * it is better to set X to a feasible point
5838 * but X can be infeasible, in which case algorithm will try
5839 to find feasible point first, using X as initial
5840 approximation.
5841 DiffStep- differentiation step, DiffStep>0. Algorithm performs
5842 numerical differentiation with step for I-th variable
5843 being equal to DiffStep*S[I] (here S[] is a scale vector,
5844 set by minnssetscale() function).
5845 Do not use too small steps, because it may lead to
5846 catastrophic cancellation during intermediate calculations.
5847
5848OUTPUT PARAMETERS:
5849 State - structure stores algorithm state
5850
5851 -- ALGLIB --
5852 Copyright 18.05.2015 by Bochkanov Sergey
5853*************************************************************************/
5854void minnscreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minnsstate &state);
5855void minnscreatef(const real_1d_array &x, const double diffstep, minnsstate &state);
5856
5857
5858/*************************************************************************
5859This function sets boundary constraints.
5860
5861Boundary constraints are inactive by default (after initial creation).
5862They are preserved after algorithm restart with minnsrestartfrom().
5863
5864INPUT PARAMETERS:
5865 State - structure stores algorithm state
5866 BndL - lower bounds, array[N].
5867 If some (all) variables are unbounded, you may specify
5868 very small number or -INF.
5869 BndU - upper bounds, array[N].
5870 If some (all) variables are unbounded, you may specify
5871 very large number or +INF.
5872
5873NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
5874variable will be "frozen" at X[i]=BndL[i]=BndU[i].
5875
5876NOTE 2: AGS solver has following useful properties:
5877* bound constraints are always satisfied exactly
5878* function is evaluated only INSIDE area specified by bound constraints,
5879 even when numerical differentiation is used (algorithm adjusts nodes
5880 according to boundary constraints)
5881
5882 -- ALGLIB --
5883 Copyright 18.05.2015 by Bochkanov Sergey
5884*************************************************************************/
5885void minnssetbc(const minnsstate &state, const real_1d_array &bndl, const real_1d_array &bndu);
5886
5887
5888/*************************************************************************
5889This function sets linear constraints.
5890
5891Linear constraints are inactive by default (after initial creation).
5892They are preserved after algorithm restart with minnsrestartfrom().
5893
5894INPUT PARAMETERS:
5895 State - structure previously allocated with minnscreate() call.
5896 C - linear constraints, array[K,N+1].
5897 Each row of C represents one constraint, either equality
5898 or inequality (see below):
5899 * first N elements correspond to coefficients,
5900 * last element corresponds to the right part.
5901 All elements of C (including right part) must be finite.
5902 CT - type of constraints, array[K]:
5903 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
5904 * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
5905 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
5906 K - number of equality/inequality constraints, K>=0:
5907 * if given, only leading K elements of C/CT are used
5908 * if not given, automatically determined from sizes of C/CT
5909
5910NOTE: linear (non-bound) constraints are satisfied only approximately:
5911
5912* there always exists some minor violation (about current sampling radius
5913 in magnitude during optimization, about EpsX in the solution) due to use
5914 of penalty method to handle constraints.
5915* numerical differentiation, if used, may lead to function evaluations
5916 outside of the feasible area, because algorithm does NOT change
5917 numerical differentiation formula according to linear constraints.
5918
5919If you want constraints to be satisfied exactly, try to reformulate your
5920problem in such manner that all constraints will become boundary ones
5921(this kind of constraints is always satisfied exactly, both in the final
5922solution and in all intermediate points).
5923
5924 -- ALGLIB --
5925 Copyright 18.05.2015 by Bochkanov Sergey
5926*************************************************************************/
5927void minnssetlc(const minnsstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k);
5928void minnssetlc(const minnsstate &state, const real_2d_array &c, const integer_1d_array &ct);
5929
5930
5931/*************************************************************************
5932This function sets nonlinear constraints.
5933
5934In fact, this function sets NUMBER of nonlinear constraints. Constraints
5935itself (constraint functions) are passed to minnsoptimize() method. This
5936method requires user-defined vector function F[] and its Jacobian J[],
5937where:
5938* first component of F[] and first row of Jacobian J[] correspond to
5939 function being minimized
5940* next NLEC components of F[] (and rows of J) correspond to nonlinear
5941 equality constraints G_i(x)=0
5942* next NLIC components of F[] (and rows of J) correspond to nonlinear
5943 inequality constraints H_i(x)<=0
5944
5945NOTE: you may combine nonlinear constraints with linear/boundary ones. If
5946 your problem has mixed constraints, you may explicitly specify some
5947 of them as linear ones. It may help optimizer to handle them more
5948 efficiently.
5949
5950INPUT PARAMETERS:
5951 State - structure previously allocated with minnscreate() call.
5952 NLEC - number of Non-Linear Equality Constraints (NLEC), >=0
5953 NLIC - number of Non-Linear Inquality Constraints (NLIC), >=0
5954
5955NOTE 1: nonlinear constraints are satisfied only approximately! It is
5956 possible that algorithm will evaluate function outside of
5957 the feasible area!
5958
5959NOTE 2: algorithm scales variables according to scale specified by
5960 minnssetscale() function, so it can handle problems with badly
5961 scaled variables (as long as we KNOW their scales).
5962
5963 However, there is no way to automatically scale nonlinear
5964 constraints Gi(x) and Hi(x). Inappropriate scaling of Gi/Hi may
5965 ruin convergence. Solving problem with constraint "1000*G0(x)=0"
5966 is NOT same as solving it with constraint "0.001*G0(x)=0".
5967
5968 It means that YOU are the one who is responsible for correct
5969 scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you
5970 to scale nonlinear constraints in such way that I-th component of
5971 dG/dX (or dH/dx) has approximately unit magnitude (for problems
5972 with unit scale) or has magnitude approximately equal to 1/S[i]
5973 (where S is a scale set by minnssetscale() function).
5974
5975NOTE 3: nonlinear constraints are always hard to handle, no matter what
5976 algorithm you try to use. Even basic box/linear constraints modify
5977 function curvature by adding valleys and ridges. However,
5978 nonlinear constraints add valleys which are very hard to follow
5979 due to their "curved" nature.
5980
5981 It means that optimization with single nonlinear constraint may be
5982 significantly slower than optimization with multiple linear ones.
5983 It is normal situation, and we recommend you to carefully choose
5984 Rho parameter of minnssetalgoags(), because too large value may
5985 slow down convergence.
5986
5987
5988 -- ALGLIB --
5989 Copyright 18.05.2015 by Bochkanov Sergey
5990*************************************************************************/
5991void minnssetnlc(const minnsstate &state, const ae_int_t nlec, const ae_int_t nlic);
5992
5993
5994/*************************************************************************
5995This function sets stopping conditions for iterations of optimizer.
5996
5997INPUT PARAMETERS:
5998 State - structure which stores algorithm state
5999 EpsX - >=0
6000 The AGS solver finishes its work if on k+1-th iteration
6001 sampling radius decreases below EpsX.
6002 MaxIts - maximum number of iterations. If MaxIts=0, the number of
6003 iterations is unlimited.
6004
6005Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic
6006stopping criterion selection. We do not recommend you to rely on default
6007choice in production code.
6008
6009 -- ALGLIB --
6010 Copyright 18.05.2015 by Bochkanov Sergey
6011*************************************************************************/
6012void minnssetcond(const minnsstate &state, const double epsx, const ae_int_t maxits);
6013
6014
6015/*************************************************************************
6016This function sets scaling coefficients for NLC optimizer.
6017
6018ALGLIB optimizers use scaling matrices to test stopping conditions (step
6019size and gradient are scaled before comparison with tolerances). Scale of
6020the I-th variable is a translation invariant measure of:
6021a) "how large" the variable is
6022b) how large the step should be to make significant changes in the function
6023
6024Scaling is also used by finite difference variant of the optimizer - step
6025along I-th axis is equal to DiffStep*S[I].
6026
6027INPUT PARAMETERS:
6028 State - structure stores algorithm state
6029 S - array[N], non-zero scaling coefficients
6030 S[i] may be negative, sign doesn't matter.
6031
6032 -- ALGLIB --
6033 Copyright 18.05.2015 by Bochkanov Sergey
6034*************************************************************************/
6035void minnssetscale(const minnsstate &state, const real_1d_array &s);
6036
6037
6038/*************************************************************************
6039This function tells MinNS unit to use AGS (adaptive gradient sampling)
6040algorithm for nonsmooth constrained optimization. This algorithm is a
6041slight modification of one described in "An Adaptive Gradient Sampling
6042Algorithm for Nonsmooth Optimization" by Frank E. Curtisy and Xiaocun Quez.
6043
6044This optimizer has following benefits and drawbacks:
6045+ robustness; it can be used with nonsmooth and nonconvex functions.
6046+ relatively easy tuning; most of the metaparameters are easy to select.
6047- it has convergence of steepest descent, slower than CG/LBFGS.
6048- each iteration involves evaluation of ~2N gradient values and solution
6049 of 2Nx2N quadratic programming problem, which limits applicability of
6050 algorithm by small-scale problems (up to 50-100).
6051
6052IMPORTANT: this algorithm has convergence guarantees, i.e. it will
6053 steadily move towards some stationary point of the function.
6054
6055 However, "stationary point" does not always mean "solution".
6056 Nonsmooth problems often have "flat spots", i.e. areas where
6057 function do not change at all. Such "flat spots" are stationary
6058 points by definition, and algorithm may be caught here.
6059
6060 Nonsmooth CONVEX tasks are not prone to this problem. Say, if
6061 your function has form f()=MAX(f0,f1,...), and f_i are convex,
6062 then f() is convex too and you have guaranteed convergence to
6063 solution.
6064
6065INPUT PARAMETERS:
6066 State - structure which stores algorithm state
6067 Radius - initial sampling radius, >=0.
6068
6069 Internally multiplied by vector of per-variable scales
6070 specified by minnssetscale()).
6071
6072 You should select relatively large sampling radius, roughly
6073 proportional to scaled length of the first steps of the
6074 algorithm. Something close to 0.1 in magnitude should be
6075 good for most problems.
6076
6077 AGS solver can automatically decrease radius, so too large
6078 radius is not a problem (assuming that you won't choose
6079 so large radius that algorithm will sample function in
6080 too far away points, where gradient value is irrelevant).
6081
6082 Too small radius won't cause algorithm to fail, but it may
6083 slow down algorithm (it may have to perform too short
6084 steps).
6085 Penalty - penalty coefficient for nonlinear constraints:
6086 * for problem with nonlinear constraints should be some
6087 problem-specific positive value, large enough that
6088 penalty term changes shape of the function.
6089 Starting from some problem-specific value penalty
6090 coefficient becomes large enough to exactly enforce
6091 nonlinear constraints; larger values do not improve
6092 precision.
6093 Increasing it too much may slow down convergence, so you
6094 should choose it carefully.
6095 * can be zero for problems WITHOUT nonlinear constraints
6096 (i.e. for unconstrained ones or ones with just box or
6097 linear constraints)
6098 * if you specify zero value for problem with at least one
6099 nonlinear constraint, algorithm will terminate with
6100 error code -1.
6101
6102ALGORITHM OUTLINE
6103
6104The very basic outline of unconstrained AGS algorithm is given below:
6105
61060. If sampling radius is below EpsX or we performed more then MaxIts
6107 iterations - STOP.
61081. sample O(N) gradient values at random locations around current point;
6109 informally speaking, this sample is an implicit piecewise linear model
6110 of the function, although algorithm formulation does not mention that
6111 explicitly
61122. solve quadratic programming problem in order to find descent direction
61133. if QP solver tells us that we are near solution, decrease sampling
6114 radius and move to (0)
61154. perform backtracking line search
61165. after moving to new point, goto (0)
6117
6118As for the constraints:
6119* box constraints are handled exactly by modification of the function
6120 being minimized
6121* linear/nonlinear constraints are handled by adding L1 penalty. Because
6122 our solver can handle nonsmoothness, we can use L1 penalty function,
6123 which is an exact one (i.e. exact solution is returned under such
6124 penalty).
6125* penalty coefficient for linear constraints is chosen automatically;
6126 however, penalty coefficient for nonlinear constraints must be specified
6127 by user.
6128
6129 -- ALGLIB --
6130 Copyright 18.05.2015 by Bochkanov Sergey
6131*************************************************************************/
6132void minnssetalgoags(const minnsstate &state, const double radius, const double penalty);
6133
6134
6135/*************************************************************************
6136This function turns on/off reporting.
6137
6138INPUT PARAMETERS:
6139 State - structure which stores algorithm state
6140 NeedXRep- whether iteration reports are needed or not
6141
6142If NeedXRep is True, algorithm will call rep() callback function if it is
6143provided to minnsoptimize().
6144
6145 -- ALGLIB --
6146 Copyright 28.11.2010 by Bochkanov Sergey
6147*************************************************************************/
6148void minnssetxrep(const minnsstate &state, const bool needxrep);
6149
6150
6151/*************************************************************************
6152This subroutine submits request for termination of running optimizer. It
6153should be called from user-supplied callback when user decides that it is
6154time to "smoothly" terminate optimization process. As result, optimizer
6155stops at point which was "current accepted" when termination request was
6156submitted and returns error code 8 (successful termination).
6157
6158INPUT PARAMETERS:
6159 State - optimizer structure
6160
6161NOTE: after request for termination optimizer may perform several
6162 additional calls to user-supplied callbacks. It does NOT guarantee
6163 to stop immediately - it just guarantees that these additional calls
6164 will be discarded later.
6165
6166NOTE: calling this function on optimizer which is NOT running will have no
6167 effect.
6168
6169NOTE: multiple calls to this function are possible. First call is counted,
6170 subsequent calls are silently ignored.
6171
6172 -- ALGLIB --
6173 Copyright 18.05.2015 by Bochkanov Sergey
6174*************************************************************************/
6175void minnsrequesttermination(const minnsstate &state);
6176
6177
6178/*************************************************************************
6179This function provides reverse communication interface
6180Reverse communication interface is not documented or recommended to use.
6181See below for functions which provide better documented API
6182*************************************************************************/
6183bool minnsiteration(const minnsstate &state);
6184
6185
6186/*************************************************************************
6187This family of functions is used to launcn iterations of nonlinear optimizer
6188
6189These functions accept following parameters:
6190 state - algorithm state
6191 fvec - callback which calculates function vector fi[]
6192 at given point x
6193 jac - callback which calculates function vector fi[]
6194 and Jacobian jac at given point x
6195 rep - optional callback which is called after each iteration
6196 can be NULL
6197 ptr - optional pointer which is passed to func/grad/hess/jac/rep
6198 can be NULL
6199
6200
6201NOTES:
6202
62031. This function has two different implementations: one which uses exact
6204 (analytical) user-supplied Jacobian, and one which uses only function
6205 vector and numerically differentiates function in order to obtain
6206 gradient.
6207
6208 Depending on the specific function used to create optimizer object
6209 you should choose appropriate variant of minnsoptimize() - one which
6210 accepts function AND Jacobian or one which accepts ONLY function.
6211
6212 Be careful to choose variant of minnsoptimize() which corresponds to
6213 your optimization scheme! Table below lists different combinations of
6214 callback (function/gradient) passed to minnsoptimize() and specific
6215 function used to create optimizer.
6216
6217
6218 | USER PASSED TO minnsoptimize()
6219 CREATED WITH | function only | function and gradient
6220 ------------------------------------------------------------
6221 minnscreatef() | works FAILS
6222 minnscreate() | FAILS works
6223
6224 Here "FAILS" denotes inappropriate combinations of optimizer creation
6225 function and minnsoptimize() version. Attemps to use such
6226 combination will lead to exception. Either you did not pass gradient
6227 when it WAS needed or you passed gradient when it was NOT needed.
6228
6229 -- ALGLIB --
6230 Copyright 18.05.2015 by Bochkanov Sergey
6231
6232*************************************************************************/
6233void minnsoptimize(minnsstate &state,
6234 void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
6235 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6236 void *ptr = NULL);
6237void minnsoptimize(minnsstate &state,
6238 void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
6239 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6240 void *ptr = NULL);
6241
6242
6243/*************************************************************************
6244MinNS results
6245
6246INPUT PARAMETERS:
6247 State - algorithm state
6248
6249OUTPUT PARAMETERS:
6250 X - array[0..N-1], solution
6251 Rep - optimization report. You should check Rep.TerminationType
6252 in order to distinguish successful termination from
6253 unsuccessful one:
6254 * -8 internal integrity control detected infinite or
6255 NAN values in function/gradient. Abnormal
6256 termination signalled.
6257 * -3 box constraints are inconsistent
6258 * -1 inconsistent parameters were passed:
6259 * penalty parameter for minnssetalgoags() is zero,
6260 but we have nonlinear constraints set by minnssetnlc()
6261 * 2 sampling radius decreased below epsx
6262 * 7 stopping conditions are too stringent,
6263 further improvement is impossible,
6264 X contains best point found so far.
6265 * 8 User requested termination via minnsrequesttermination()
6266
6267 -- ALGLIB --
6268 Copyright 18.05.2015 by Bochkanov Sergey
6269*************************************************************************/
6270void minnsresults(const minnsstate &state, real_1d_array &x, minnsreport &rep);
6271
6272
6273/*************************************************************************
6274
6275Buffered implementation of minnsresults() which uses pre-allocated buffer
6276to store X[]. If buffer size is too small, it resizes buffer. It is
6277intended to be used in the inner cycles of performance critical algorithms
6278where array reallocation penalty is too large to be ignored.
6279
6280 -- ALGLIB --
6281 Copyright 18.05.2015 by Bochkanov Sergey
6282*************************************************************************/
6283void minnsresultsbuf(const minnsstate &state, real_1d_array &x, minnsreport &rep);
6284
6285
6286/*************************************************************************
6287This subroutine restarts algorithm from new point.
6288All optimization parameters (including constraints) are left unchanged.
6289
6290This function allows to solve multiple optimization problems (which
6291must have same number of dimensions) without object reallocation penalty.
6292
6293INPUT PARAMETERS:
6294 State - structure previously allocated with minnscreate() call.
6295 X - new starting point.
6296
6297 -- ALGLIB --
6298 Copyright 18.05.2015 by Bochkanov Sergey
6299*************************************************************************/
6300void minnsrestartfrom(const minnsstate &state, const real_1d_array &x);
6301
6302/*************************************************************************
6303Obsolete function, use MinLBFGSSetPrecDefault() instead.
6304
6305 -- ALGLIB --
6306 Copyright 13.10.2010 by Bochkanov Sergey
6307*************************************************************************/
6308void minlbfgssetdefaultpreconditioner(const minlbfgsstate &state);
6309
6310
6311/*************************************************************************
6312Obsolete function, use MinLBFGSSetCholeskyPreconditioner() instead.
6313
6314 -- ALGLIB --
6315 Copyright 13.10.2010 by Bochkanov Sergey
6316*************************************************************************/
6317void minlbfgssetcholeskypreconditioner(const minlbfgsstate &state, const real_2d_array &p, const bool isupper);
6318
6319
6320/*************************************************************************
6321This is obsolete function which was used by previous version of the BLEIC
6322optimizer. It does nothing in the current version of BLEIC.
6323
6324 -- ALGLIB --
6325 Copyright 28.11.2010 by Bochkanov Sergey
6326*************************************************************************/
6327void minbleicsetbarrierwidth(const minbleicstate &state, const double mu);
6328
6329
6330/*************************************************************************
6331This is obsolete function which was used by previous version of the BLEIC
6332optimizer. It does nothing in the current version of BLEIC.
6333
6334 -- ALGLIB --
6335 Copyright 28.11.2010 by Bochkanov Sergey
6336*************************************************************************/
6337void minbleicsetbarrierdecay(const minbleicstate &state, const double mudecay);
6338
6339
6340/*************************************************************************
6341Obsolete optimization algorithm.
6342Was replaced by MinBLEIC subpackage.
6343
6344 -- ALGLIB --
6345 Copyright 25.03.2010 by Bochkanov Sergey
6346*************************************************************************/
6347void minasacreate(const ae_int_t n, const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, minasastate &state);
6348void minasacreate(const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, minasastate &state);
6349
6350
6351/*************************************************************************
6352Obsolete optimization algorithm.
6353Was replaced by MinBLEIC subpackage.
6354
6355 -- ALGLIB --
6356 Copyright 02.04.2010 by Bochkanov Sergey
6357*************************************************************************/
6358void minasasetcond(const minasastate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
6359
6360
6361/*************************************************************************
6362Obsolete optimization algorithm.
6363Was replaced by MinBLEIC subpackage.
6364
6365 -- ALGLIB --
6366 Copyright 02.04.2010 by Bochkanov Sergey
6367*************************************************************************/
6368void minasasetxrep(const minasastate &state, const bool needxrep);
6369
6370
6371/*************************************************************************
6372Obsolete optimization algorithm.
6373Was replaced by MinBLEIC subpackage.
6374
6375 -- ALGLIB --
6376 Copyright 02.04.2010 by Bochkanov Sergey
6377*************************************************************************/
6378void minasasetalgorithm(const minasastate &state, const ae_int_t algotype);
6379
6380
6381/*************************************************************************
6382Obsolete optimization algorithm.
6383Was replaced by MinBLEIC subpackage.
6384
6385 -- ALGLIB --
6386 Copyright 02.04.2010 by Bochkanov Sergey
6387*************************************************************************/
6388void minasasetstpmax(const minasastate &state, const double stpmax);
6389
6390
6391/*************************************************************************
6392This function provides reverse communication interface
6393Reverse communication interface is not documented or recommended to use.
6394See below for functions which provide better documented API
6395*************************************************************************/
6396bool minasaiteration(const minasastate &state);
6397
6398
6399/*************************************************************************
6400This family of functions is used to launcn iterations of nonlinear optimizer
6401
6402These functions accept following parameters:
6403 state - algorithm state
6404 grad - callback which calculates function (or merit function)
6405 value func and gradient grad at given point x
6406 rep - optional callback which is called after each iteration
6407 can be NULL
6408 ptr - optional pointer which is passed to func/grad/hess/jac/rep
6409 can be NULL
6410
6411
6412 -- ALGLIB --
6413 Copyright 20.03.2009 by Bochkanov Sergey
6414
6415*************************************************************************/
6416void minasaoptimize(minasastate &state,
6417 void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
6418 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6419 void *ptr = NULL);
6420
6421
6422/*************************************************************************
6423Obsolete optimization algorithm.
6424Was replaced by MinBLEIC subpackage.
6425
6426 -- ALGLIB --
6427 Copyright 20.03.2009 by Bochkanov Sergey
6428*************************************************************************/
6429void minasaresults(const minasastate &state, real_1d_array &x, minasareport &rep);
6430
6431
6432/*************************************************************************
6433Obsolete optimization algorithm.
6434Was replaced by MinBLEIC subpackage.
6435
6436 -- ALGLIB --
6437 Copyright 20.03.2009 by Bochkanov Sergey
6438*************************************************************************/
6439void minasaresultsbuf(const minasastate &state, real_1d_array &x, minasareport &rep);
6440
6441
6442/*************************************************************************
6443Obsolete optimization algorithm.
6444Was replaced by MinBLEIC subpackage.
6445
6446 -- ALGLIB --
6447 Copyright 30.07.2010 by Bochkanov Sergey
6448*************************************************************************/
6449void minasarestartfrom(const minasastate &state, const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu);
6450
6451/*************************************************************************
6452 IMPROVED LEVENBERG-MARQUARDT METHOD FOR
6453 NON-LINEAR LEAST SQUARES OPTIMIZATION
6454
6455DESCRIPTION:
6456This function is used to find minimum of function which is represented as
6457sum of squares:
6458 F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
6459using value of function vector f[] and Jacobian of f[].
6460
6461
6462REQUIREMENTS:
6463This algorithm will request following information during its operation:
6464
6465* function vector f[] at given point X
6466* function vector f[] and Jacobian of f[] (simultaneously) at given point
6467
6468There are several overloaded versions of MinLMOptimize() function which
6469correspond to different LM-like optimization algorithms provided by this
6470unit. You should choose version which accepts fvec() and jac() callbacks.
6471First one is used to calculate f[] at given point, second one calculates
6472f[] and Jacobian df[i]/dx[j].
6473
6474You can try to initialize MinLMState structure with VJ function and then
6475use incorrect version of MinLMOptimize() (for example, version which
6476works with general form function and does not provide Jacobian), but it
6477will lead to exception being thrown after first attempt to calculate
6478Jacobian.
6479
6480
6481USAGE:
64821. User initializes algorithm state with MinLMCreateVJ() call
64832. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and
6484 other functions
64853. User calls MinLMOptimize() function which takes algorithm state and
6486 callback functions.
64874. User calls MinLMResults() to get solution
64885. Optionally, user may call MinLMRestartFrom() to solve another problem
6489 with same N/M but another starting point and/or another function.
6490 MinLMRestartFrom() allows to reuse already initialized structure.
6491
6492
6493INPUT PARAMETERS:
6494 N - dimension, N>1
6495 * if given, only leading N elements of X are used
6496 * if not given, automatically determined from size of X
6497 M - number of functions f[i]
6498 X - initial solution, array[0..N-1]
6499
6500OUTPUT PARAMETERS:
6501 State - structure which stores algorithm state
6502
6503NOTES:
65041. you may tune stopping conditions with MinLMSetCond() function
65052. if target function contains exp() or other fast growing functions, and
6506 optimization algorithm makes too large steps which leads to overflow,
6507 use MinLMSetStpMax() function to bound algorithm's steps.
6508
6509 -- ALGLIB --
6510 Copyright 30.03.2009 by Bochkanov Sergey
6511*************************************************************************/
6512void minlmcreatevj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state);
6513void minlmcreatevj(const ae_int_t m, const real_1d_array &x, minlmstate &state);
6514
6515
6516/*************************************************************************
6517 IMPROVED LEVENBERG-MARQUARDT METHOD FOR
6518 NON-LINEAR LEAST SQUARES OPTIMIZATION
6519
6520DESCRIPTION:
6521This function is used to find minimum of function which is represented as
6522sum of squares:
6523 F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
6524using value of function vector f[] only. Finite differences are used to
6525calculate Jacobian.
6526
6527
6528REQUIREMENTS:
6529This algorithm will request following information during its operation:
6530* function vector f[] at given point X
6531
6532There are several overloaded versions of MinLMOptimize() function which
6533correspond to different LM-like optimization algorithms provided by this
6534unit. You should choose version which accepts fvec() callback.
6535
6536You can try to initialize MinLMState structure with VJ function and then
6537use incorrect version of MinLMOptimize() (for example, version which
6538works with general form function and does not accept function vector), but
6539it will lead to exception being thrown after first attempt to calculate
6540Jacobian.
6541
6542
6543USAGE:
65441. User initializes algorithm state with MinLMCreateV() call
65452. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and
6546 other functions
65473. User calls MinLMOptimize() function which takes algorithm state and
6548 callback functions.
65494. User calls MinLMResults() to get solution
65505. Optionally, user may call MinLMRestartFrom() to solve another problem
6551 with same N/M but another starting point and/or another function.
6552 MinLMRestartFrom() allows to reuse already initialized structure.
6553
6554
6555INPUT PARAMETERS:
6556 N - dimension, N>1
6557 * if given, only leading N elements of X are used
6558 * if not given, automatically determined from size of X
6559 M - number of functions f[i]
6560 X - initial solution, array[0..N-1]
6561 DiffStep- differentiation step, >0
6562
6563OUTPUT PARAMETERS:
6564 State - structure which stores algorithm state
6565
6566See also MinLMIteration, MinLMResults.
6567
6568NOTES:
65691. you may tune stopping conditions with MinLMSetCond() function
65702. if target function contains exp() or other fast growing functions, and
6571 optimization algorithm makes too large steps which leads to overflow,
6572 use MinLMSetStpMax() function to bound algorithm's steps.
6573
6574 -- ALGLIB --
6575 Copyright 30.03.2009 by Bochkanov Sergey
6576*************************************************************************/
6577void minlmcreatev(const ae_int_t n, const ae_int_t m, const real_1d_array &x, const double diffstep, minlmstate &state);
6578void minlmcreatev(const ae_int_t m, const real_1d_array &x, const double diffstep, minlmstate &state);
6579
6580
6581/*************************************************************************
6582 LEVENBERG-MARQUARDT-LIKE METHOD FOR NON-LINEAR OPTIMIZATION
6583
6584DESCRIPTION:
6585This function is used to find minimum of general form (not "sum-of-
6586-squares") function
6587 F = F(x[0], ..., x[n-1])
6588using its gradient and Hessian. Levenberg-Marquardt modification with
6589L-BFGS pre-optimization and internal pre-conditioned L-BFGS optimization
6590after each Levenberg-Marquardt step is used.
6591
6592
6593REQUIREMENTS:
6594This algorithm will request following information during its operation:
6595
6596* function value F at given point X
6597* F and gradient G (simultaneously) at given point X
6598* F, G and Hessian H (simultaneously) at given point X
6599
6600There are several overloaded versions of MinLMOptimize() function which
6601correspond to different LM-like optimization algorithms provided by this
6602unit. You should choose version which accepts func(), grad() and hess()
6603function pointers. First pointer is used to calculate F at given point,
6604second one calculates F(x) and grad F(x), third one calculates F(x),
6605grad F(x), hess F(x).
6606
6607You can try to initialize MinLMState structure with FGH-function and then
6608use incorrect version of MinLMOptimize() (for example, version which does
6609not provide Hessian matrix), but it will lead to exception being thrown
6610after first attempt to calculate Hessian.
6611
6612
6613USAGE:
66141. User initializes algorithm state with MinLMCreateFGH() call
66152. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and
6616 other functions
66173. User calls MinLMOptimize() function which takes algorithm state and
6618 pointers (delegates, etc.) to callback functions.
66194. User calls MinLMResults() to get solution
66205. Optionally, user may call MinLMRestartFrom() to solve another problem
6621 with same N but another starting point and/or another function.
6622 MinLMRestartFrom() allows to reuse already initialized structure.
6623
6624
6625INPUT PARAMETERS:
6626 N - dimension, N>1
6627 * if given, only leading N elements of X are used
6628 * if not given, automatically determined from size of X
6629 X - initial solution, array[0..N-1]
6630
6631OUTPUT PARAMETERS:
6632 State - structure which stores algorithm state
6633
6634NOTES:
66351. you may tune stopping conditions with MinLMSetCond() function
66362. if target function contains exp() or other fast growing functions, and
6637 optimization algorithm makes too large steps which leads to overflow,
6638 use MinLMSetStpMax() function to bound algorithm's steps.
6639
6640 -- ALGLIB --
6641 Copyright 30.03.2009 by Bochkanov Sergey
6642*************************************************************************/
6643void minlmcreatefgh(const ae_int_t n, const real_1d_array &x, minlmstate &state);
6644void minlmcreatefgh(const real_1d_array &x, minlmstate &state);
6645
6646
6647/*************************************************************************
6648This function sets stopping conditions for Levenberg-Marquardt optimization
6649algorithm.
6650
6651INPUT PARAMETERS:
6652 State - structure which stores algorithm state
6653 EpsX - >=0
6654 The subroutine finishes its work if on k+1-th iteration
6655 the condition |v|<=EpsX is fulfilled, where:
6656 * |.| means Euclidian norm
6657 * v - scaled step vector, v[i]=dx[i]/s[i]
6658 * dx - ste pvector, dx=X(k+1)-X(k)
6659 * s - scaling coefficients set by MinLMSetScale()
6660 Recommended values: 1E-9 ... 1E-12.
6661 MaxIts - maximum number of iterations. If MaxIts=0, the number of
6662 iterations is unlimited. Only Levenberg-Marquardt
6663 iterations are counted (L-BFGS/CG iterations are NOT
6664 counted because their cost is very low compared to that of
6665 LM).
6666
6667Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic
6668stopping criterion selection (small EpsX).
6669
6670NOTE: it is not recommended to set large EpsX (say, 0.001). Because LM is
6671 a second-order method, it performs very precise steps anyway.
6672
6673 -- ALGLIB --
6674 Copyright 02.04.2010 by Bochkanov Sergey
6675*************************************************************************/
6676void minlmsetcond(const minlmstate &state, const double epsx, const ae_int_t maxits);
6677
6678
6679/*************************************************************************
6680This function turns on/off reporting.
6681
6682INPUT PARAMETERS:
6683 State - structure which stores algorithm state
6684 NeedXRep- whether iteration reports are needed or not
6685
6686If NeedXRep is True, algorithm will call rep() callback function if it is
6687provided to MinLMOptimize(). Both Levenberg-Marquardt and internal L-BFGS
6688iterations are reported.
6689
6690 -- ALGLIB --
6691 Copyright 02.04.2010 by Bochkanov Sergey
6692*************************************************************************/
6693void minlmsetxrep(const minlmstate &state, const bool needxrep);
6694
6695
6696/*************************************************************************
6697This function sets maximum step length
6698
6699INPUT PARAMETERS:
6700 State - structure which stores algorithm state
6701 StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't
6702 want to limit step length.
6703
6704Use this subroutine when you optimize target function which contains exp()
6705or other fast growing functions, and optimization algorithm makes too
6706large steps which leads to overflow. This function allows us to reject
6707steps that are too large (and therefore expose us to the possible
6708overflow) without actually calculating function value at the x+stp*d.
6709
6710NOTE: non-zero StpMax leads to moderate performance degradation because
6711intermediate step of preconditioned L-BFGS optimization is incompatible
6712with limits on step size.
6713
6714 -- ALGLIB --
6715 Copyright 02.04.2010 by Bochkanov Sergey
6716*************************************************************************/
6717void minlmsetstpmax(const minlmstate &state, const double stpmax);
6718
6719
6720/*************************************************************************
6721This function sets scaling coefficients for LM optimizer.
6722
6723ALGLIB optimizers use scaling matrices to test stopping conditions (step
6724size and gradient are scaled before comparison with tolerances). Scale of
6725the I-th variable is a translation invariant measure of:
6726a) "how large" the variable is
6727b) how large the step should be to make significant changes in the function
6728
6729Generally, scale is NOT considered to be a form of preconditioner. But LM
6730optimizer is unique in that it uses scaling matrix both in the stopping
6731condition tests and as Marquardt damping factor.
6732
6733Proper scaling is very important for the algorithm performance. It is less
6734important for the quality of results, but still has some influence (it is
6735easier to converge when variables are properly scaled, so premature
6736stopping is possible when very badly scalled variables are combined with
6737relaxed stopping conditions).
6738
6739INPUT PARAMETERS:
6740 State - structure stores algorithm state
6741 S - array[N], non-zero scaling coefficients
6742 S[i] may be negative, sign doesn't matter.
6743
6744 -- ALGLIB --
6745 Copyright 14.01.2011 by Bochkanov Sergey
6746*************************************************************************/
6747void minlmsetscale(const minlmstate &state, const real_1d_array &s);
6748
6749
6750/*************************************************************************
6751This function sets boundary constraints for LM optimizer
6752
6753Boundary constraints are inactive by default (after initial creation).
6754They are preserved until explicitly turned off with another SetBC() call.
6755
6756INPUT PARAMETERS:
6757 State - structure stores algorithm state
6758 BndL - lower bounds, array[N].
6759 If some (all) variables are unbounded, you may specify
6760 very small number or -INF (latter is recommended because
6761 it will allow solver to use better algorithm).
6762 BndU - upper bounds, array[N].
6763 If some (all) variables are unbounded, you may specify
6764 very large number or +INF (latter is recommended because
6765 it will allow solver to use better algorithm).
6766
6767NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
6768variable will be "frozen" at X[i]=BndL[i]=BndU[i].
6769
6770NOTE 2: this solver has following useful properties:
6771* bound constraints are always satisfied exactly
6772* function is evaluated only INSIDE area specified by bound constraints
6773 or at its boundary
6774
6775 -- ALGLIB --
6776 Copyright 14.01.2011 by Bochkanov Sergey
6777*************************************************************************/
6778void minlmsetbc(const minlmstate &state, const real_1d_array &bndl, const real_1d_array &bndu);
6779
6780
6781/*************************************************************************
6782This function sets general linear constraints for LM optimizer
6783
6784Linear constraints are inactive by default (after initial creation). They
6785are preserved until explicitly turned off with another minlmsetlc() call.
6786
6787INPUT PARAMETERS:
6788 State - structure stores algorithm state
6789 C - linear constraints, array[K,N+1].
6790 Each row of C represents one constraint, either equality
6791 or inequality (see below):
6792 * first N elements correspond to coefficients,
6793 * last element corresponds to the right part.
6794 All elements of C (including right part) must be finite.
6795 CT - type of constraints, array[K]:
6796 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
6797 * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
6798 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
6799 K - number of equality/inequality constraints, K>=0:
6800 * if given, only leading K elements of C/CT are used
6801 * if not given, automatically determined from sizes of C/CT
6802
6803IMPORTANT: if you have linear constraints, it is strongly recommended to
6804 set scale of variables with minlmsetscale(). QP solver which is
6805 used to calculate linearly constrained steps heavily relies on
6806 good scaling of input problems.
6807
6808IMPORTANT: solvers created with minlmcreatefgh() do not support linear
6809 constraints.
6810
6811NOTE: linear (non-bound) constraints are satisfied only approximately -
6812 there always exists some violation due to numerical errors and
6813 algorithmic limitations.
6814
6815NOTE: general linear constraints add significant overhead to solution
6816 process. Although solver performs roughly same amount of iterations
6817 (when compared with similar box-only constrained problem), each
6818 iteration now involves solution of linearly constrained QP
6819 subproblem, which requires ~3-5 times more Cholesky decompositions.
6820 Thus, if you can reformulate your problem in such way this it has
6821 only box constraints, it may be beneficial to do so.
6822
6823 -- ALGLIB --
6824 Copyright 14.01.2011 by Bochkanov Sergey
6825*************************************************************************/
6826void minlmsetlc(const minlmstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k);
6827void minlmsetlc(const minlmstate &state, const real_2d_array &c, const integer_1d_array &ct);
6828
6829
6830/*************************************************************************
6831This function is used to change acceleration settings
6832
6833You can choose between three acceleration strategies:
6834* AccType=0, no acceleration.
6835* AccType=1, secant updates are used to update quadratic model after each
6836 iteration. After fixed number of iterations (or after model breakdown)
6837 we recalculate quadratic model using analytic Jacobian or finite
6838 differences. Number of secant-based iterations depends on optimization
6839 settings: about 3 iterations - when we have analytic Jacobian, up to 2*N
6840 iterations - when we use finite differences to calculate Jacobian.
6841
6842AccType=1 is recommended when Jacobian calculation cost is prohibitively
6843high (several Mx1 function vector calculations followed by several NxN
6844Cholesky factorizations are faster than calculation of one M*N Jacobian).
6845It should also be used when we have no Jacobian, because finite difference
6846approximation takes too much time to compute.
6847
6848Table below list optimization protocols (XYZ protocol corresponds to
6849MinLMCreateXYZ) and acceleration types they support (and use by default).
6850
6851ACCELERATION TYPES SUPPORTED BY OPTIMIZATION PROTOCOLS:
6852
6853protocol 0 1 comment
6854V + +
6855VJ + +
6856FGH +
6857
6858DEFAULT VALUES:
6859
6860protocol 0 1 comment
6861V x without acceleration it is so slooooooooow
6862VJ x
6863FGH x
6864
6865NOTE: this function should be called before optimization. Attempt to call
6866it during algorithm iterations may result in unexpected behavior.
6867
6868NOTE: attempt to call this function with unsupported protocol/acceleration
6869combination will result in exception being thrown.
6870
6871 -- ALGLIB --
6872 Copyright 14.10.2010 by Bochkanov Sergey
6873*************************************************************************/
6874void minlmsetacctype(const minlmstate &state, const ae_int_t acctype);
6875
6876
6877/*************************************************************************
6878This function provides reverse communication interface
6879Reverse communication interface is not documented or recommended to use.
6880See below for functions which provide better documented API
6881*************************************************************************/
6882bool minlmiteration(const minlmstate &state);
6883
6884
6885/*************************************************************************
6886This family of functions is used to launcn iterations of nonlinear optimizer
6887
6888These functions accept following parameters:
6889 state - algorithm state
6890 func - callback which calculates function (or merit function)
6891 value func at given point x
6892 grad - callback which calculates function (or merit function)
6893 value func and gradient grad at given point x
6894 hess - callback which calculates function (or merit function)
6895 value func, gradient grad and Hessian hess at given point x
6896 fvec - callback which calculates function vector fi[]
6897 at given point x
6898 jac - callback which calculates function vector fi[]
6899 and Jacobian jac at given point x
6900 rep - optional callback which is called after each iteration
6901 can be NULL
6902 ptr - optional pointer which is passed to func/grad/hess/jac/rep
6903 can be NULL
6904
6905NOTES:
6906
69071. Depending on function used to create state structure, this algorithm
6908 may accept Jacobian and/or Hessian and/or gradient. According to the
6909 said above, there ase several versions of this function, which accept
6910 different sets of callbacks.
6911
6912 This flexibility opens way to subtle errors - you may create state with
6913 MinLMCreateFGH() (optimization using Hessian), but call function which
6914 does not accept Hessian. So when algorithm will request Hessian, there
6915 will be no callback to call. In this case exception will be thrown.
6916
6917 Be careful to avoid such errors because there is no way to find them at
6918 compile time - you can see them at runtime only.
6919
6920 -- ALGLIB --
6921 Copyright 10.03.2009 by Bochkanov Sergey
6922
6923*************************************************************************/
6924void minlmoptimize(minlmstate &state,
6925 void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
6926 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6927 void *ptr = NULL);
6928void minlmoptimize(minlmstate &state,
6929 void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
6930 void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
6931 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6932 void *ptr = NULL);
6933void minlmoptimize(minlmstate &state,
6934 void (*func)(const real_1d_array &x, double &func, void *ptr),
6935 void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
6936 void (*hess)(const real_1d_array &x, double &func, real_1d_array &grad, real_2d_array &hess, void *ptr),
6937 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6938 void *ptr = NULL);
6939void minlmoptimize(minlmstate &state,
6940 void (*func)(const real_1d_array &x, double &func, void *ptr),
6941 void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
6942 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6943 void *ptr = NULL);
6944void minlmoptimize(minlmstate &state,
6945 void (*func)(const real_1d_array &x, double &func, void *ptr),
6946 void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
6947 void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
6948 void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6949 void *ptr = NULL);
6950
6951
6952/*************************************************************************
6953Levenberg-Marquardt algorithm results
6954
6955INPUT PARAMETERS:
6956 State - algorithm state
6957
6958OUTPUT PARAMETERS:
6959 X - array[0..N-1], solution
6960 Rep - optimization report; includes termination codes and
6961 additional information. Termination codes are listed below,
6962 see comments for this structure for more info.
6963 Termination code is stored in rep.terminationtype field:
6964 * -8 optimizer detected NAN/INF values either in the
6965 function itself, or in its Jacobian
6966 * -7 derivative correctness check failed;
6967 see rep.funcidx, rep.varidx for
6968 more information.
6969 * -3 constraints are inconsistent
6970 * 2 relative step is no more than EpsX.
6971 * 5 MaxIts steps was taken
6972 * 7 stopping conditions are too stringent,
6973 further improvement is impossible
6974 * 8 terminated by user who called minlmrequesttermination().
6975 X contains point which was "current accepted" when
6976 termination request was submitted.
6977
6978 -- ALGLIB --
6979 Copyright 10.03.2009 by Bochkanov Sergey
6980*************************************************************************/
6981void minlmresults(const minlmstate &state, real_1d_array &x, minlmreport &rep);
6982
6983
6984/*************************************************************************
6985Levenberg-Marquardt algorithm results
6986
6987Buffered implementation of MinLMResults(), which uses pre-allocated buffer
6988to store X[]. If buffer size is too small, it resizes buffer. It is
6989intended to be used in the inner cycles of performance critical algorithms
6990where array reallocation penalty is too large to be ignored.
6991
6992 -- ALGLIB --
6993 Copyright 10.03.2009 by Bochkanov Sergey
6994*************************************************************************/
6995void minlmresultsbuf(const minlmstate &state, real_1d_array &x, minlmreport &rep);
6996
6997
6998/*************************************************************************
6999This subroutine restarts LM algorithm from new point. All optimization
7000parameters are left unchanged.
7001
7002This function allows to solve multiple optimization problems (which
7003must have same number of dimensions) without object reallocation penalty.
7004
7005INPUT PARAMETERS:
7006 State - structure used for reverse communication previously
7007 allocated with MinLMCreateXXX call.
7008 X - new starting point.
7009
7010 -- ALGLIB --
7011 Copyright 30.07.2010 by Bochkanov Sergey
7012*************************************************************************/
7013void minlmrestartfrom(const minlmstate &state, const real_1d_array &x);
7014
7015
7016/*************************************************************************
7017This subroutine submits request for termination of running optimizer. It
7018should be called from user-supplied callback when user decides that it is
7019time to "smoothly" terminate optimization process. As result, optimizer
7020stops at point which was "current accepted" when termination request was
7021submitted and returns error code 8 (successful termination).
7022
7023INPUT PARAMETERS:
7024 State - optimizer structure
7025
7026NOTE: after request for termination optimizer may perform several
7027 additional calls to user-supplied callbacks. It does NOT guarantee
7028 to stop immediately - it just guarantees that these additional calls
7029 will be discarded later.
7030
7031NOTE: calling this function on optimizer which is NOT running will have no
7032 effect.
7033
7034NOTE: multiple calls to this function are possible. First call is counted,
7035 subsequent calls are silently ignored.
7036
7037 -- ALGLIB --
7038 Copyright 08.10.2014 by Bochkanov Sergey
7039*************************************************************************/
7040void minlmrequesttermination(const minlmstate &state);
7041
7042
7043/*************************************************************************
7044This is obsolete function.
7045
7046Since ALGLIB 3.3 it is equivalent to MinLMCreateVJ().
7047
7048 -- ALGLIB --
7049 Copyright 30.03.2009 by Bochkanov Sergey
7050*************************************************************************/
7051void minlmcreatevgj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state);
7052void minlmcreatevgj(const ae_int_t m, const real_1d_array &x, minlmstate &state);
7053
7054
7055/*************************************************************************
7056This is obsolete function.
7057
7058Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ().
7059
7060 -- ALGLIB --
7061 Copyright 30.03.2009 by Bochkanov Sergey
7062*************************************************************************/
7063void minlmcreatefgj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state);
7064void minlmcreatefgj(const ae_int_t m, const real_1d_array &x, minlmstate &state);
7065
7066
7067/*************************************************************************
7068This function is considered obsolete since ALGLIB 3.1.0 and is present for
7069backward compatibility only. We recommend to use MinLMCreateVJ, which
7070provides similar, but more consistent and feature-rich interface.
7071
7072 -- ALGLIB --
7073 Copyright 30.03.2009 by Bochkanov Sergey
7074*************************************************************************/
7075void minlmcreatefj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state);
7076void minlmcreatefj(const ae_int_t m, const real_1d_array &x, minlmstate &state);
7077
7078
7079/*************************************************************************
7080This subroutine turns on verification of the user-supplied analytic
7081gradient:
7082* user calls this subroutine before optimization begins
7083* MinLMOptimize() is called
7084* prior to actual optimization, for each function Fi and each component
7085 of parameters being optimized X[j] algorithm performs following steps:
7086 * two trial steps are made to X[j]-TestStep*S[j] and X[j]+TestStep*S[j],
7087 where X[j] is j-th parameter and S[j] is a scale of j-th parameter
7088 * if needed, steps are bounded with respect to constraints on X[]
7089 * Fi(X) is evaluated at these trial points
7090 * we perform one more evaluation in the middle point of the interval
7091 * we build cubic model using function values and derivatives at trial
7092 points and we compare its prediction with actual value in the middle
7093 point
7094 * in case difference between prediction and actual value is higher than
7095 some predetermined threshold, algorithm stops with completion code -7;
7096 Rep.VarIdx is set to index of the parameter with incorrect derivative,
7097 Rep.FuncIdx is set to index of the function.
7098* after verification is over, algorithm proceeds to the actual optimization.
7099
7100NOTE 1: verification needs N (parameters count) Jacobian evaluations. It
7101 is very costly and you should use it only for low dimensional
7102 problems, when you want to be sure that you've correctly
7103 calculated analytic derivatives. You should not use it in the
7104 production code (unless you want to check derivatives provided
7105 by some third party).
7106
7107NOTE 2: you should carefully choose TestStep. Value which is too large
7108 (so large that function behaviour is significantly non-cubic) will
7109 lead to false alarms. You may use different step for different
7110 parameters by means of setting scale with MinLMSetScale().
7111
7112NOTE 3: this function may lead to false positives. In case it reports that
7113 I-th derivative was calculated incorrectly, you may decrease test
7114 step and try one more time - maybe your function changes too
7115 sharply and your step is too large for such rapidly chanding
7116 function.
7117
7118INPUT PARAMETERS:
7119 State - structure used to store algorithm state
7120 TestStep - verification step:
7121 * TestStep=0 turns verification off
7122 * TestStep>0 activates verification
7123
7124 -- ALGLIB --
7125 Copyright 15.06.2012 by Bochkanov Sergey
7126*************************************************************************/
7127void minlmsetgradientcheck(const minlmstate &state, const double teststep);
7128}
7129
7131//
7132// THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (FUNCTIONS)
7133//
7135namespace alglib_impl
7136{
7137void cqminit(ae_int_t n, convexquadraticmodel* s, ae_state *_state);
7138void cqmseta(convexquadraticmodel* s,
7139 /* Real */ ae_matrix* a,
7140 ae_bool isupper,
7141 double alpha,
7142 ae_state *_state);
7143void cqmgeta(convexquadraticmodel* s,
7144 /* Real */ ae_matrix* a,
7145 ae_state *_state);
7146void cqmrewritedensediagonal(convexquadraticmodel* s,
7147 /* Real */ ae_vector* z,
7148 ae_state *_state);
7149void cqmsetd(convexquadraticmodel* s,
7150 /* Real */ ae_vector* d,
7151 double tau,
7152 ae_state *_state);
7153void cqmdropa(convexquadraticmodel* s, ae_state *_state);
7154void cqmsetb(convexquadraticmodel* s,
7155 /* Real */ ae_vector* b,
7156 ae_state *_state);
7157void cqmsetq(convexquadraticmodel* s,
7158 /* Real */ ae_matrix* q,
7159 /* Real */ ae_vector* r,
7160 ae_int_t k,
7161 double theta,
7162 ae_state *_state);
7163void cqmsetactiveset(convexquadraticmodel* s,
7164 /* Real */ ae_vector* x,
7165 /* Boolean */ ae_vector* activeset,
7166 ae_state *_state);
7167double cqmeval(convexquadraticmodel* s,
7168 /* Real */ ae_vector* x,
7169 ae_state *_state);
7170void cqmevalx(convexquadraticmodel* s,
7171 /* Real */ ae_vector* x,
7172 double* r,
7173 double* noise,
7174 ae_state *_state);
7175void cqmgradunconstrained(convexquadraticmodel* s,
7176 /* Real */ ae_vector* x,
7177 /* Real */ ae_vector* g,
7178 ae_state *_state);
7179double cqmxtadx2(convexquadraticmodel* s,
7180 /* Real */ ae_vector* x,
7181 ae_state *_state);
7182void cqmadx(convexquadraticmodel* s,
7183 /* Real */ ae_vector* x,
7184 /* Real */ ae_vector* y,
7185 ae_state *_state);
7186ae_bool cqmconstrainedoptimum(convexquadraticmodel* s,
7187 /* Real */ ae_vector* x,
7188 ae_state *_state);
7189void cqmscalevector(convexquadraticmodel* s,
7190 /* Real */ ae_vector* x,
7191 ae_state *_state);
7192double cqmdebugconstrainedevalt(convexquadraticmodel* s,
7193 /* Real */ ae_vector* x,
7194 ae_state *_state);
7195double cqmdebugconstrainedevale(convexquadraticmodel* s,
7196 /* Real */ ae_vector* x,
7197 ae_state *_state);
7198void _convexquadraticmodel_init(void* _p, ae_state *_state);
7199void _convexquadraticmodel_init_copy(void* _dst, void* _src, ae_state *_state);
7200void _convexquadraticmodel_clear(void* _p);
7201void _convexquadraticmodel_destroy(void* _p);
7202void trimprepare(double f, double* threshold, ae_state *_state);
7203void trimfunction(double* f,
7204 /* Real */ ae_vector* g,
7205 ae_int_t n,
7206 double threshold,
7207 ae_state *_state);
7208ae_bool enforceboundaryconstraints(/* Real */ ae_vector* x,
7209 /* Real */ ae_vector* bl,
7210 /* Boolean */ ae_vector* havebl,
7211 /* Real */ ae_vector* bu,
7212 /* Boolean */ ae_vector* havebu,
7213 ae_int_t nmain,
7214 ae_int_t nslack,
7215 ae_state *_state);
7216void projectgradientintobc(/* Real */ ae_vector* x,
7217 /* Real */ ae_vector* g,
7218 /* Real */ ae_vector* bl,
7219 /* Boolean */ ae_vector* havebl,
7220 /* Real */ ae_vector* bu,
7221 /* Boolean */ ae_vector* havebu,
7222 ae_int_t nmain,
7223 ae_int_t nslack,
7224 ae_state *_state);
7225void calculatestepbound(/* Real */ ae_vector* x,
7226 /* Real */ ae_vector* d,
7227 double alpha,
7228 /* Real */ ae_vector* bndl,
7229 /* Boolean */ ae_vector* havebndl,
7230 /* Real */ ae_vector* bndu,
7231 /* Boolean */ ae_vector* havebndu,
7232 ae_int_t nmain,
7233 ae_int_t nslack,
7234 ae_int_t* variabletofreeze,
7235 double* valuetofreeze,
7236 double* maxsteplen,
7237 ae_state *_state);
7238ae_int_t postprocessboundedstep(/* Real */ ae_vector* x,
7239 /* Real */ ae_vector* xprev,
7240 /* Real */ ae_vector* bndl,
7241 /* Boolean */ ae_vector* havebndl,
7242 /* Real */ ae_vector* bndu,
7243 /* Boolean */ ae_vector* havebndu,
7244 ae_int_t nmain,
7245 ae_int_t nslack,
7246 ae_int_t variabletofreeze,
7247 double valuetofreeze,
7248 double steptaken,
7249 double maxsteplen,
7250 ae_state *_state);
7251void filterdirection(/* Real */ ae_vector* d,
7252 /* Real */ ae_vector* x,
7253 /* Real */ ae_vector* bndl,
7254 /* Boolean */ ae_vector* havebndl,
7255 /* Real */ ae_vector* bndu,
7256 /* Boolean */ ae_vector* havebndu,
7257 /* Real */ ae_vector* s,
7258 ae_int_t nmain,
7259 ae_int_t nslack,
7260 double droptol,
7261 ae_state *_state);
7262ae_int_t numberofchangedconstraints(/* Real */ ae_vector* x,
7263 /* Real */ ae_vector* xprev,
7264 /* Real */ ae_vector* bndl,
7265 /* Boolean */ ae_vector* havebndl,
7266 /* Real */ ae_vector* bndu,
7267 /* Boolean */ ae_vector* havebndu,
7268 ae_int_t nmain,
7269 ae_int_t nslack,
7270 ae_state *_state);
7271ae_bool findfeasiblepoint(/* Real */ ae_vector* x,
7272 /* Real */ ae_vector* bndl,
7273 /* Boolean */ ae_vector* havebndl,
7274 /* Real */ ae_vector* bndu,
7275 /* Boolean */ ae_vector* havebndu,
7276 ae_int_t nmain,
7277 ae_int_t nslack,
7278 /* Real */ ae_matrix* ce,
7279 ae_int_t k,
7280 double epsi,
7281 ae_int_t* qpits,
7282 ae_int_t* gpaits,
7283 ae_state *_state);
7284ae_bool derivativecheck(double f0,
7285 double df0,
7286 double f1,
7287 double df1,
7288 double f,
7289 double df,
7290 double width,
7291 ae_state *_state);
7292void estimateparabolicmodel(double absasum,
7293 double absasum2,
7294 double mx,
7295 double mb,
7296 double md,
7297 double d1,
7298 double d2,
7299 ae_int_t* d1est,
7300 ae_int_t* d2est,
7301 ae_state *_state);
7302void inexactlbfgspreconditioner(/* Real */ ae_vector* s,
7303 ae_int_t n,
7304 /* Real */ ae_vector* d,
7305 /* Real */ ae_vector* c,
7306 /* Real */ ae_matrix* w,
7307 ae_int_t k,
7308 precbuflbfgs* buf,
7309 ae_state *_state);
7310void preparelowrankpreconditioner(/* Real */ ae_vector* d,
7311 /* Real */ ae_vector* c,
7312 /* Real */ ae_matrix* w,
7313 ae_int_t n,
7314 ae_int_t k,
7315 precbuflowrank* buf,
7316 ae_state *_state);
7317void applylowrankpreconditioner(/* Real */ ae_vector* s,
7318 precbuflowrank* buf,
7319 ae_state *_state);
7320void _precbuflbfgs_init(void* _p, ae_state *_state);
7321void _precbuflbfgs_init_copy(void* _dst, void* _src, ae_state *_state);
7322void _precbuflbfgs_clear(void* _p);
7323void _precbuflbfgs_destroy(void* _p);
7324void _precbuflowrank_init(void* _p, ae_state *_state);
7325void _precbuflowrank_init_copy(void* _dst, void* _src, ae_state *_state);
7326void _precbuflowrank_clear(void* _p);
7327void _precbuflowrank_destroy(void* _p);
7328void snnlsinit(ae_int_t nsmax,
7329 ae_int_t ndmax,
7330 ae_int_t nrmax,
7331 snnlssolver* s,
7332 ae_state *_state);
7333void snnlssetproblem(snnlssolver* s,
7334 /* Real */ ae_matrix* a,
7335 /* Real */ ae_vector* b,
7336 ae_int_t ns,
7337 ae_int_t nd,
7338 ae_int_t nr,
7339 ae_state *_state);
7340void snnlsdropnnc(snnlssolver* s, ae_int_t idx, ae_state *_state);
7341void snnlssolve(snnlssolver* s,
7342 /* Real */ ae_vector* x,
7343 ae_state *_state);
7344void _snnlssolver_init(void* _p, ae_state *_state);
7345void _snnlssolver_init_copy(void* _dst, void* _src, ae_state *_state);
7346void _snnlssolver_clear(void* _p);
7347void _snnlssolver_destroy(void* _p);
7348void sasinit(ae_int_t n, sactiveset* s, ae_state *_state);
7349void sassetscale(sactiveset* state,
7350 /* Real */ ae_vector* s,
7351 ae_state *_state);
7352void sassetprecdiag(sactiveset* state,
7353 /* Real */ ae_vector* d,
7354 ae_state *_state);
7355void sassetbc(sactiveset* state,
7356 /* Real */ ae_vector* bndl,
7357 /* Real */ ae_vector* bndu,
7358 ae_state *_state);
7359void sassetlc(sactiveset* state,
7360 /* Real */ ae_matrix* c,
7361 /* Integer */ ae_vector* ct,
7362 ae_int_t k,
7363 ae_state *_state);
7364void sassetlcx(sactiveset* state,
7365 /* Real */ ae_matrix* cleic,
7366 ae_int_t nec,
7367 ae_int_t nic,
7368 ae_state *_state);
7369ae_bool sasstartoptimization(sactiveset* state,
7370 /* Real */ ae_vector* x,
7371 ae_state *_state);
7372void sasexploredirection(sactiveset* state,
7373 /* Real */ ae_vector* d,
7374 double* stpmax,
7375 ae_int_t* cidx,
7376 double* vval,
7377 ae_state *_state);
7378ae_int_t sasmoveto(sactiveset* state,
7379 /* Real */ ae_vector* xn,
7380 ae_bool needact,
7381 ae_int_t cidx,
7382 double cval,
7383 ae_state *_state);
7384void sasimmediateactivation(sactiveset* state,
7385 ae_int_t cidx,
7386 double cval,
7387 ae_state *_state);
7388void sasconstraineddescent(sactiveset* state,
7389 /* Real */ ae_vector* g,
7390 /* Real */ ae_vector* d,
7391 ae_state *_state);
7392void sasconstraineddescentprec(sactiveset* state,
7393 /* Real */ ae_vector* g,
7394 /* Real */ ae_vector* d,
7395 ae_state *_state);
7396void sasconstraineddirection(sactiveset* state,
7397 /* Real */ ae_vector* d,
7398 ae_state *_state);
7399void sasconstraineddirectionprec(sactiveset* state,
7400 /* Real */ ae_vector* d,
7401 ae_state *_state);
7402void sascorrection(sactiveset* state,
7403 /* Real */ ae_vector* x,
7404 double* penalty,
7405 ae_state *_state);
7406double sasactivelcpenalty1(sactiveset* state,
7407 /* Real */ ae_vector* x,
7408 ae_state *_state);
7409double sasscaledconstrainednorm(sactiveset* state,
7410 /* Real */ ae_vector* d,
7411 ae_state *_state);
7412void sasstopoptimization(sactiveset* state, ae_state *_state);
7413void sasreactivateconstraints(sactiveset* state,
7414 /* Real */ ae_vector* gc,
7415 ae_state *_state);
7416void sasreactivateconstraintsprec(sactiveset* state,
7417 /* Real */ ae_vector* gc,
7418 ae_state *_state);
7419void sasrebuildbasis(sactiveset* state, ae_state *_state);
7420void _sactiveset_init(void* _p, ae_state *_state);
7421void _sactiveset_init_copy(void* _dst, void* _src, ae_state *_state);
7422void _sactiveset_clear(void* _p);
7423void _sactiveset_destroy(void* _p);
7424void qqploaddefaults(ae_int_t nmain, qqpsettings* s, ae_state *_state);
7425void qqpcopysettings(qqpsettings* src, qqpsettings* dst, ae_state *_state);
7426void qqpoptimize(convexquadraticmodel* cqmac,
7427 sparsematrix* sparseac,
7428 /* Real */ ae_matrix* denseac,
7429 ae_int_t akind,
7430 ae_bool isupper,
7431 /* Real */ ae_vector* bc,
7432 /* Real */ ae_vector* bndlc,
7433 /* Real */ ae_vector* bnduc,
7434 /* Real */ ae_vector* sc,
7435 /* Real */ ae_vector* xoriginc,
7436 ae_int_t nc,
7437 /* Real */ ae_matrix* cleicc,
7438 ae_int_t nec,
7439 ae_int_t nic,
7440 qqpsettings* settings,
7441 qqpbuffers* sstate,
7442 /* Real */ ae_vector* xs,
7443 ae_int_t* terminationtype,
7444 ae_state *_state);
7445void _qqpsettings_init(void* _p, ae_state *_state);
7446void _qqpsettings_init_copy(void* _dst, void* _src, ae_state *_state);
7447void _qqpsettings_clear(void* _p);
7448void _qqpsettings_destroy(void* _p);
7449void _qqpbuffers_init(void* _p, ae_state *_state);
7450void _qqpbuffers_init_copy(void* _dst, void* _src, ae_state *_state);
7451void _qqpbuffers_clear(void* _p);
7452void _qqpbuffers_destroy(void* _p);
7453void minlbfgscreate(ae_int_t n,
7454 ae_int_t m,
7455 /* Real */ ae_vector* x,
7456 minlbfgsstate* state,
7457 ae_state *_state);
7458void minlbfgscreatef(ae_int_t n,
7459 ae_int_t m,
7460 /* Real */ ae_vector* x,
7461 double diffstep,
7462 minlbfgsstate* state,
7463 ae_state *_state);
7464void minlbfgssetcond(minlbfgsstate* state,
7465 double epsg,
7466 double epsf,
7467 double epsx,
7468 ae_int_t maxits,
7469 ae_state *_state);
7470void minlbfgssetxrep(minlbfgsstate* state,
7471 ae_bool needxrep,
7472 ae_state *_state);
7473void minlbfgssetstpmax(minlbfgsstate* state,
7474 double stpmax,
7475 ae_state *_state);
7476void minlbfgssetscale(minlbfgsstate* state,
7477 /* Real */ ae_vector* s,
7478 ae_state *_state);
7479void minlbfgscreatex(ae_int_t n,
7480 ae_int_t m,
7481 /* Real */ ae_vector* x,
7482 ae_int_t flags,
7483 double diffstep,
7484 minlbfgsstate* state,
7485 ae_state *_state);
7486void minlbfgssetprecdefault(minlbfgsstate* state, ae_state *_state);
7487void minlbfgssetpreccholesky(minlbfgsstate* state,
7488 /* Real */ ae_matrix* p,
7489 ae_bool isupper,
7490 ae_state *_state);
7491void minlbfgssetprecdiag(minlbfgsstate* state,
7492 /* Real */ ae_vector* d,
7493 ae_state *_state);
7494void minlbfgssetprecscale(minlbfgsstate* state, ae_state *_state);
7495void minlbfgssetprecrankklbfgsfast(minlbfgsstate* state,
7496 /* Real */ ae_vector* d,
7497 /* Real */ ae_vector* c,
7498 /* Real */ ae_matrix* w,
7499 ae_int_t cnt,
7500 ae_state *_state);
7501void minlbfgssetpreclowrankexact(minlbfgsstate* state,
7502 /* Real */ ae_vector* d,
7503 /* Real */ ae_vector* c,
7504 /* Real */ ae_matrix* w,
7505 ae_int_t cnt,
7506 ae_state *_state);
7507ae_bool minlbfgsiteration(minlbfgsstate* state, ae_state *_state);
7508void minlbfgsresults(minlbfgsstate* state,
7509 /* Real */ ae_vector* x,
7510 minlbfgsreport* rep,
7511 ae_state *_state);
7512void minlbfgsresultsbuf(minlbfgsstate* state,
7513 /* Real */ ae_vector* x,
7514 minlbfgsreport* rep,
7515 ae_state *_state);
7516void minlbfgsrestartfrom(minlbfgsstate* state,
7517 /* Real */ ae_vector* x,
7518 ae_state *_state);
7519void minlbfgsrequesttermination(minlbfgsstate* state, ae_state *_state);
7520void minlbfgssetgradientcheck(minlbfgsstate* state,
7521 double teststep,
7522 ae_state *_state);
7523void _minlbfgsstate_init(void* _p, ae_state *_state);
7524void _minlbfgsstate_init_copy(void* _dst, void* _src, ae_state *_state);
7525void _minlbfgsstate_clear(void* _p);
7526void _minlbfgsstate_destroy(void* _p);
7527void _minlbfgsreport_init(void* _p, ae_state *_state);
7528void _minlbfgsreport_init_copy(void* _dst, void* _src, ae_state *_state);
7529void _minlbfgsreport_clear(void* _p);
7530void _minlbfgsreport_destroy(void* _p);
7531void qpdenseaulloaddefaults(ae_int_t nmain,
7532 qpdenseaulsettings* s,
7533 ae_state *_state);
7534void qpdenseauloptimize(convexquadraticmodel* a,
7535 sparsematrix* sparsea,
7536 ae_int_t akind,
7537 ae_bool sparseaupper,
7538 /* Real */ ae_vector* b,
7539 /* Real */ ae_vector* bndl,
7540 /* Real */ ae_vector* bndu,
7541 /* Real */ ae_vector* s,
7542 /* Real */ ae_vector* xorigin,
7543 ae_int_t nn,
7544 /* Real */ ae_matrix* cleic,
7545 ae_int_t dnec,
7546 ae_int_t dnic,
7547 sparsematrix* scleic,
7548 ae_int_t snec,
7549 ae_int_t snic,
7550 ae_bool renormlc,
7551 qpdenseaulsettings* settings,
7552 qpdenseaulbuffers* state,
7553 /* Real */ ae_vector* xs,
7554 ae_int_t* terminationtype,
7555 ae_state *_state);
7556void _qpdenseaulsettings_init(void* _p, ae_state *_state);
7557void _qpdenseaulsettings_init_copy(void* _dst, void* _src, ae_state *_state);
7558void _qpdenseaulsettings_clear(void* _p);
7559void _qpdenseaulsettings_destroy(void* _p);
7560void _qpdenseaulbuffers_init(void* _p, ae_state *_state);
7561void _qpdenseaulbuffers_init_copy(void* _dst, void* _src, ae_state *_state);
7562void _qpdenseaulbuffers_clear(void* _p);
7563void _qpdenseaulbuffers_destroy(void* _p);
7564void qpcholeskyloaddefaults(ae_int_t nmain,
7565 qpcholeskysettings* s,
7566 ae_state *_state);
7567void qpcholeskycopysettings(qpcholeskysettings* src,
7568 qpcholeskysettings* dst,
7569 ae_state *_state);
7570void qpcholeskyoptimize(convexquadraticmodel* a,
7571 double anorm,
7572 /* Real */ ae_vector* b,
7573 /* Real */ ae_vector* bndl,
7574 /* Real */ ae_vector* bndu,
7575 /* Real */ ae_vector* s,
7576 /* Real */ ae_vector* xorigin,
7577 ae_int_t n,
7578 /* Real */ ae_matrix* cleic,
7579 ae_int_t nec,
7580 ae_int_t nic,
7581 qpcholeskybuffers* sstate,
7582 /* Real */ ae_vector* xsc,
7583 ae_int_t* terminationtype,
7584 ae_state *_state);
7585void _qpcholeskysettings_init(void* _p, ae_state *_state);
7586void _qpcholeskysettings_init_copy(void* _dst, void* _src, ae_state *_state);
7587void _qpcholeskysettings_clear(void* _p);
7588void _qpcholeskysettings_destroy(void* _p);
7589void _qpcholeskybuffers_init(void* _p, ae_state *_state);
7590void _qpcholeskybuffers_init_copy(void* _dst, void* _src, ae_state *_state);
7591void _qpcholeskybuffers_clear(void* _p);
7592void _qpcholeskybuffers_destroy(void* _p);
7593void mincgcreate(ae_int_t n,
7594 /* Real */ ae_vector* x,
7595 mincgstate* state,
7596 ae_state *_state);
7597void mincgcreatef(ae_int_t n,
7598 /* Real */ ae_vector* x,
7599 double diffstep,
7600 mincgstate* state,
7601 ae_state *_state);
7602void mincgsetcond(mincgstate* state,
7603 double epsg,
7604 double epsf,
7605 double epsx,
7606 ae_int_t maxits,
7607 ae_state *_state);
7608void mincgsetscale(mincgstate* state,
7609 /* Real */ ae_vector* s,
7610 ae_state *_state);
7611void mincgsetxrep(mincgstate* state, ae_bool needxrep, ae_state *_state);
7612void mincgsetdrep(mincgstate* state, ae_bool needdrep, ae_state *_state);
7613void mincgsetcgtype(mincgstate* state, ae_int_t cgtype, ae_state *_state);
7614void mincgsetstpmax(mincgstate* state, double stpmax, ae_state *_state);
7615void mincgsuggeststep(mincgstate* state, double stp, ae_state *_state);
7616double mincglastgoodstep(mincgstate* state, ae_state *_state);
7617void mincgsetprecdefault(mincgstate* state, ae_state *_state);
7618void mincgsetprecdiag(mincgstate* state,
7619 /* Real */ ae_vector* d,
7620 ae_state *_state);
7621void mincgsetprecscale(mincgstate* state, ae_state *_state);
7622ae_bool mincgiteration(mincgstate* state, ae_state *_state);
7623void mincgresults(mincgstate* state,
7624 /* Real */ ae_vector* x,
7625 mincgreport* rep,
7626 ae_state *_state);
7627void mincgresultsbuf(mincgstate* state,
7628 /* Real */ ae_vector* x,
7629 mincgreport* rep,
7630 ae_state *_state);
7631void mincgrestartfrom(mincgstate* state,
7632 /* Real */ ae_vector* x,
7633 ae_state *_state);
7634void mincgrequesttermination(mincgstate* state, ae_state *_state);
7635void mincgsetprecdiagfast(mincgstate* state,
7636 /* Real */ ae_vector* d,
7637 ae_state *_state);
7638void mincgsetpreclowrankfast(mincgstate* state,
7639 /* Real */ ae_vector* d1,
7640 /* Real */ ae_vector* c,
7641 /* Real */ ae_matrix* v,
7642 ae_int_t vcnt,
7643 ae_state *_state);
7644void mincgsetprecvarpart(mincgstate* state,
7645 /* Real */ ae_vector* d2,
7646 ae_state *_state);
7647void mincgsetgradientcheck(mincgstate* state,
7648 double teststep,
7649 ae_state *_state);
7650void _mincgstate_init(void* _p, ae_state *_state);
7651void _mincgstate_init_copy(void* _dst, void* _src, ae_state *_state);
7652void _mincgstate_clear(void* _p);
7653void _mincgstate_destroy(void* _p);
7654void _mincgreport_init(void* _p, ae_state *_state);
7655void _mincgreport_init_copy(void* _dst, void* _src, ae_state *_state);
7656void _mincgreport_clear(void* _p);
7657void _mincgreport_destroy(void* _p);
7658void minbleiccreate(ae_int_t n,
7659 /* Real */ ae_vector* x,
7660 minbleicstate* state,
7661 ae_state *_state);
7662void minbleiccreatef(ae_int_t n,
7663 /* Real */ ae_vector* x,
7664 double diffstep,
7665 minbleicstate* state,
7666 ae_state *_state);
7667void minbleicsetbc(minbleicstate* state,
7668 /* Real */ ae_vector* bndl,
7669 /* Real */ ae_vector* bndu,
7670 ae_state *_state);
7671void minbleicsetlc(minbleicstate* state,
7672 /* Real */ ae_matrix* c,
7673 /* Integer */ ae_vector* ct,
7674 ae_int_t k,
7675 ae_state *_state);
7676void minbleicsetcond(minbleicstate* state,
7677 double epsg,
7678 double epsf,
7679 double epsx,
7680 ae_int_t maxits,
7681 ae_state *_state);
7682void minbleicsetscale(minbleicstate* state,
7683 /* Real */ ae_vector* s,
7684 ae_state *_state);
7685void minbleicsetprecdefault(minbleicstate* state, ae_state *_state);
7686void minbleicsetprecdiag(minbleicstate* state,
7687 /* Real */ ae_vector* d,
7688 ae_state *_state);
7689void minbleicsetprecscale(minbleicstate* state, ae_state *_state);
7690void minbleicsetxrep(minbleicstate* state,
7691 ae_bool needxrep,
7692 ae_state *_state);
7693void minbleicsetdrep(minbleicstate* state,
7694 ae_bool needdrep,
7695 ae_state *_state);
7696void minbleicsetstpmax(minbleicstate* state,
7697 double stpmax,
7698 ae_state *_state);
7699ae_bool minbleiciteration(minbleicstate* state, ae_state *_state);
7700void minbleicresults(minbleicstate* state,
7701 /* Real */ ae_vector* x,
7702 minbleicreport* rep,
7703 ae_state *_state);
7704void minbleicresultsbuf(minbleicstate* state,
7705 /* Real */ ae_vector* x,
7706 minbleicreport* rep,
7707 ae_state *_state);
7708void minbleicrestartfrom(minbleicstate* state,
7709 /* Real */ ae_vector* x,
7710 ae_state *_state);
7711void minbleicrequesttermination(minbleicstate* state, ae_state *_state);
7712void minbleicemergencytermination(minbleicstate* state, ae_state *_state);
7713void minbleicsetgradientcheck(minbleicstate* state,
7714 double teststep,
7715 ae_state *_state);
7716void _minbleicstate_init(void* _p, ae_state *_state);
7717void _minbleicstate_init_copy(void* _dst, void* _src, ae_state *_state);
7718void _minbleicstate_clear(void* _p);
7719void _minbleicstate_destroy(void* _p);
7720void _minbleicreport_init(void* _p, ae_state *_state);
7721void _minbleicreport_init_copy(void* _dst, void* _src, ae_state *_state);
7722void _minbleicreport_clear(void* _p);
7723void _minbleicreport_destroy(void* _p);
7724void qpbleicloaddefaults(ae_int_t nmain,
7725 qpbleicsettings* s,
7726 ae_state *_state);
7727void qpbleiccopysettings(qpbleicsettings* src,
7728 qpbleicsettings* dst,
7729 ae_state *_state);
7730void qpbleicoptimize(convexquadraticmodel* a,
7731 sparsematrix* sparsea,
7732 ae_int_t akind,
7733 ae_bool sparseaupper,
7734 double absasum,
7735 double absasum2,
7736 /* Real */ ae_vector* b,
7737 /* Real */ ae_vector* bndl,
7738 /* Real */ ae_vector* bndu,
7739 /* Real */ ae_vector* s,
7740 /* Real */ ae_vector* xorigin,
7741 ae_int_t n,
7742 /* Real */ ae_matrix* cleic,
7743 ae_int_t nec,
7744 ae_int_t nic,
7745 qpbleicsettings* settings,
7746 qpbleicbuffers* sstate,
7747 ae_bool* firstcall,
7748 /* Real */ ae_vector* xs,
7749 ae_int_t* terminationtype,
7750 ae_state *_state);
7751void _qpbleicsettings_init(void* _p, ae_state *_state);
7752void _qpbleicsettings_init_copy(void* _dst, void* _src, ae_state *_state);
7753void _qpbleicsettings_clear(void* _p);
7754void _qpbleicsettings_destroy(void* _p);
7755void _qpbleicbuffers_init(void* _p, ae_state *_state);
7756void _qpbleicbuffers_init_copy(void* _dst, void* _src, ae_state *_state);
7757void _qpbleicbuffers_clear(void* _p);
7758void _qpbleicbuffers_destroy(void* _p);
7759void minqpcreate(ae_int_t n, minqpstate* state, ae_state *_state);
7760void minqpsetlinearterm(minqpstate* state,
7761 /* Real */ ae_vector* b,
7762 ae_state *_state);
7763void minqpsetquadraticterm(minqpstate* state,
7764 /* Real */ ae_matrix* a,
7765 ae_bool isupper,
7766 ae_state *_state);
7767void minqpsetquadratictermsparse(minqpstate* state,
7768 sparsematrix* a,
7769 ae_bool isupper,
7770 ae_state *_state);
7771void minqpsetstartingpoint(minqpstate* state,
7772 /* Real */ ae_vector* x,
7773 ae_state *_state);
7774void minqpsetorigin(minqpstate* state,
7775 /* Real */ ae_vector* xorigin,
7776 ae_state *_state);
7777void minqpsetscale(minqpstate* state,
7778 /* Real */ ae_vector* s,
7779 ae_state *_state);
7780void minqpsetalgocholesky(minqpstate* state, ae_state *_state);
7781void minqpsetalgobleic(minqpstate* state,
7782 double epsg,
7783 double epsf,
7784 double epsx,
7785 ae_int_t maxits,
7786 ae_state *_state);
7787void minqpsetalgodenseaul(minqpstate* state,
7788 double epsx,
7789 double rho,
7790 ae_int_t itscnt,
7791 ae_state *_state);
7792void minqpsetalgoquickqp(minqpstate* state,
7793 double epsg,
7794 double epsf,
7795 double epsx,
7796 ae_int_t maxouterits,
7797 ae_bool usenewton,
7798 ae_state *_state);
7799void minqpsetbc(minqpstate* state,
7800 /* Real */ ae_vector* bndl,
7801 /* Real */ ae_vector* bndu,
7802 ae_state *_state);
7803void minqpsetlc(minqpstate* state,
7804 /* Real */ ae_matrix* c,
7805 /* Integer */ ae_vector* ct,
7806 ae_int_t k,
7807 ae_state *_state);
7808void minqpsetlcsparse(minqpstate* state,
7809 sparsematrix* c,
7810 /* Integer */ ae_vector* ct,
7811 ae_int_t k,
7812 ae_state *_state);
7813void minqpsetlcmixed(minqpstate* state,
7814 /* Real */ ae_matrix* densec,
7815 /* Integer */ ae_vector* densect,
7816 ae_int_t densek,
7817 sparsematrix* sparsec,
7818 /* Integer */ ae_vector* sparsect,
7819 ae_int_t sparsek,
7820 ae_state *_state);
7821void minqpoptimize(minqpstate* state, ae_state *_state);
7822void minqpresults(minqpstate* state,
7823 /* Real */ ae_vector* x,
7824 minqpreport* rep,
7825 ae_state *_state);
7826void minqpresultsbuf(minqpstate* state,
7827 /* Real */ ae_vector* x,
7828 minqpreport* rep,
7829 ae_state *_state);
7830void minqpsetlineartermfast(minqpstate* state,
7831 /* Real */ ae_vector* b,
7832 ae_state *_state);
7833void minqpsetquadratictermfast(minqpstate* state,
7834 /* Real */ ae_matrix* a,
7835 ae_bool isupper,
7836 double s,
7837 ae_state *_state);
7838void minqprewritediagonal(minqpstate* state,
7839 /* Real */ ae_vector* s,
7840 ae_state *_state);
7841void minqpsetstartingpointfast(minqpstate* state,
7842 /* Real */ ae_vector* x,
7843 ae_state *_state);
7844void minqpsetoriginfast(minqpstate* state,
7845 /* Real */ ae_vector* xorigin,
7846 ae_state *_state);
7847void _minqpstate_init(void* _p, ae_state *_state);
7848void _minqpstate_init_copy(void* _dst, void* _src, ae_state *_state);
7849void _minqpstate_clear(void* _p);
7850void _minqpstate_destroy(void* _p);
7851void _minqpreport_init(void* _p, ae_state *_state);
7852void _minqpreport_init_copy(void* _dst, void* _src, ae_state *_state);
7853void _minqpreport_clear(void* _p);
7854void _minqpreport_destroy(void* _p);
7855void minnlccreate(ae_int_t n,
7856 /* Real */ ae_vector* x,
7857 minnlcstate* state,
7858 ae_state *_state);
7859void minnlccreatef(ae_int_t n,
7860 /* Real */ ae_vector* x,
7861 double diffstep,
7862 minnlcstate* state,
7863 ae_state *_state);
7864void minnlcsetbc(minnlcstate* state,
7865 /* Real */ ae_vector* bndl,
7866 /* Real */ ae_vector* bndu,
7867 ae_state *_state);
7868void minnlcsetlc(minnlcstate* state,
7869 /* Real */ ae_matrix* c,
7870 /* Integer */ ae_vector* ct,
7871 ae_int_t k,
7872 ae_state *_state);
7873void minnlcsetnlc(minnlcstate* state,
7874 ae_int_t nlec,
7875 ae_int_t nlic,
7876 ae_state *_state);
7877void minnlcsetcond(minnlcstate* state,
7878 double epsg,
7879 double epsf,
7880 double epsx,
7881 ae_int_t maxits,
7882 ae_state *_state);
7883void minnlcsetscale(minnlcstate* state,
7884 /* Real */ ae_vector* s,
7885 ae_state *_state);
7886void minnlcsetprecinexact(minnlcstate* state, ae_state *_state);
7887void minnlcsetprecexactlowrank(minnlcstate* state,
7888 ae_int_t updatefreq,
7889 ae_state *_state);
7890void minnlcsetprecexactrobust(minnlcstate* state,
7891 ae_int_t updatefreq,
7892 ae_state *_state);
7893void minnlcsetprecnone(minnlcstate* state, ae_state *_state);
7894void minnlcsetstpmax(minnlcstate* state, double stpmax, ae_state *_state);
7895void minnlcsetalgoaul(minnlcstate* state,
7896 double rho,
7897 ae_int_t itscnt,
7898 ae_state *_state);
7899void minnlcsetxrep(minnlcstate* state, ae_bool needxrep, ae_state *_state);
7900ae_bool minnlciteration(minnlcstate* state, ae_state *_state);
7901void minnlcresults(minnlcstate* state,
7902 /* Real */ ae_vector* x,
7903 minnlcreport* rep,
7904 ae_state *_state);
7905void minnlcresultsbuf(minnlcstate* state,
7906 /* Real */ ae_vector* x,
7907 minnlcreport* rep,
7908 ae_state *_state);
7909void minnlcrestartfrom(minnlcstate* state,
7910 /* Real */ ae_vector* x,
7911 ae_state *_state);
7912void minnlcsetgradientcheck(minnlcstate* state,
7913 double teststep,
7914 ae_state *_state);
7915void minnlcequalitypenaltyfunction(double alpha,
7916 double* f,
7917 double* df,
7918 double* d2f,
7919 ae_state *_state);
7920void minnlcinequalitypenaltyfunction(double alpha,
7921 double stabilizingpoint,
7922 double* f,
7923 double* df,
7924 double* d2f,
7925 ae_state *_state);
7926void minnlcinequalityshiftfunction(double alpha,
7927 double* f,
7928 double* df,
7929 double* d2f,
7930 ae_state *_state);
7931void _minnlcstate_init(void* _p, ae_state *_state);
7932void _minnlcstate_init_copy(void* _dst, void* _src, ae_state *_state);
7933void _minnlcstate_clear(void* _p);
7934void _minnlcstate_destroy(void* _p);
7935void _minnlcreport_init(void* _p, ae_state *_state);
7936void _minnlcreport_init_copy(void* _dst, void* _src, ae_state *_state);
7937void _minnlcreport_clear(void* _p);
7938void _minnlcreport_destroy(void* _p);
7939void minbccreate(ae_int_t n,
7940 /* Real */ ae_vector* x,
7941 minbcstate* state,
7942 ae_state *_state);
7943void minbccreatef(ae_int_t n,
7944 /* Real */ ae_vector* x,
7945 double diffstep,
7946 minbcstate* state,
7947 ae_state *_state);
7948void minbcsetbc(minbcstate* state,
7949 /* Real */ ae_vector* bndl,
7950 /* Real */ ae_vector* bndu,
7951 ae_state *_state);
7952void minbcsetcond(minbcstate* state,
7953 double epsg,
7954 double epsf,
7955 double epsx,
7956 ae_int_t maxits,
7957 ae_state *_state);
7958void minbcsetscale(minbcstate* state,
7959 /* Real */ ae_vector* s,
7960 ae_state *_state);
7961void minbcsetprecdefault(minbcstate* state, ae_state *_state);
7962void minbcsetprecdiag(minbcstate* state,
7963 /* Real */ ae_vector* d,
7964 ae_state *_state);
7965void minbcsetprecscale(minbcstate* state, ae_state *_state);
7966void minbcsetxrep(minbcstate* state, ae_bool needxrep, ae_state *_state);
7967void minbcsetstpmax(minbcstate* state, double stpmax, ae_state *_state);
7968ae_bool minbciteration(minbcstate* state, ae_state *_state);
7969void minbcresults(minbcstate* state,
7970 /* Real */ ae_vector* x,
7971 minbcreport* rep,
7972 ae_state *_state);
7973void minbcresultsbuf(minbcstate* state,
7974 /* Real */ ae_vector* x,
7975 minbcreport* rep,
7976 ae_state *_state);
7977void minbcrestartfrom(minbcstate* state,
7978 /* Real */ ae_vector* x,
7979 ae_state *_state);
7980void minbcrequesttermination(minbcstate* state, ae_state *_state);
7981void minbcsetgradientcheck(minbcstate* state,
7982 double teststep,
7983 ae_state *_state);
7984void _minbcstate_init(void* _p, ae_state *_state);
7985void _minbcstate_init_copy(void* _dst, void* _src, ae_state *_state);
7986void _minbcstate_clear(void* _p);
7987void _minbcstate_destroy(void* _p);
7988void _minbcreport_init(void* _p, ae_state *_state);
7989void _minbcreport_init_copy(void* _dst, void* _src, ae_state *_state);
7990void _minbcreport_clear(void* _p);
7991void _minbcreport_destroy(void* _p);
7992void minnscreate(ae_int_t n,
7993 /* Real */ ae_vector* x,
7994 minnsstate* state,
7995 ae_state *_state);
7996void minnscreatef(ae_int_t n,
7997 /* Real */ ae_vector* x,
7998 double diffstep,
7999 minnsstate* state,
8000 ae_state *_state);
8001void minnssetbc(minnsstate* state,
8002 /* Real */ ae_vector* bndl,
8003 /* Real */ ae_vector* bndu,
8004 ae_state *_state);
8005void minnssetlc(minnsstate* state,
8006 /* Real */ ae_matrix* c,
8007 /* Integer */ ae_vector* ct,
8008 ae_int_t k,
8009 ae_state *_state);
8010void minnssetnlc(minnsstate* state,
8011 ae_int_t nlec,
8012 ae_int_t nlic,
8013 ae_state *_state);
8014void minnssetcond(minnsstate* state,
8015 double epsx,
8016 ae_int_t maxits,
8017 ae_state *_state);
8018void minnssetscale(minnsstate* state,
8019 /* Real */ ae_vector* s,
8020 ae_state *_state);
8021void minnssetalgoags(minnsstate* state,
8022 double radius,
8023 double penalty,
8024 ae_state *_state);
8025void minnssetxrep(minnsstate* state, ae_bool needxrep, ae_state *_state);
8026void minnsrequesttermination(minnsstate* state, ae_state *_state);
8027ae_bool minnsiteration(minnsstate* state, ae_state *_state);
8028void minnsresults(minnsstate* state,
8029 /* Real */ ae_vector* x,
8030 minnsreport* rep,
8031 ae_state *_state);
8032void minnsresultsbuf(minnsstate* state,
8033 /* Real */ ae_vector* x,
8034 minnsreport* rep,
8035 ae_state *_state);
8036void minnsrestartfrom(minnsstate* state,
8037 /* Real */ ae_vector* x,
8038 ae_state *_state);
8039void _minnsqp_init(void* _p, ae_state *_state);
8040void _minnsqp_init_copy(void* _dst, void* _src, ae_state *_state);
8041void _minnsqp_clear(void* _p);
8042void _minnsqp_destroy(void* _p);
8043void _minnsstate_init(void* _p, ae_state *_state);
8044void _minnsstate_init_copy(void* _dst, void* _src, ae_state *_state);
8045void _minnsstate_clear(void* _p);
8046void _minnsstate_destroy(void* _p);
8047void _minnsreport_init(void* _p, ae_state *_state);
8048void _minnsreport_init_copy(void* _dst, void* _src, ae_state *_state);
8049void _minnsreport_clear(void* _p);
8050void _minnsreport_destroy(void* _p);
8051void minlbfgssetdefaultpreconditioner(minlbfgsstate* state,
8052 ae_state *_state);
8053void minlbfgssetcholeskypreconditioner(minlbfgsstate* state,
8054 /* Real */ ae_matrix* p,
8055 ae_bool isupper,
8056 ae_state *_state);
8057void minbleicsetbarrierwidth(minbleicstate* state,
8058 double mu,
8059 ae_state *_state);
8060void minbleicsetbarrierdecay(minbleicstate* state,
8061 double mudecay,
8062 ae_state *_state);
8063void minasacreate(ae_int_t n,
8064 /* Real */ ae_vector* x,
8065 /* Real */ ae_vector* bndl,
8066 /* Real */ ae_vector* bndu,
8067 minasastate* state,
8068 ae_state *_state);
8069void minasasetcond(minasastate* state,
8070 double epsg,
8071 double epsf,
8072 double epsx,
8073 ae_int_t maxits,
8074 ae_state *_state);
8075void minasasetxrep(minasastate* state, ae_bool needxrep, ae_state *_state);
8076void minasasetalgorithm(minasastate* state,
8077 ae_int_t algotype,
8078 ae_state *_state);
8079void minasasetstpmax(minasastate* state, double stpmax, ae_state *_state);
8080ae_bool minasaiteration(minasastate* state, ae_state *_state);
8081void minasaresults(minasastate* state,
8082 /* Real */ ae_vector* x,
8083 minasareport* rep,
8084 ae_state *_state);
8085void minasaresultsbuf(minasastate* state,
8086 /* Real */ ae_vector* x,
8087 minasareport* rep,
8088 ae_state *_state);
8089void minasarestartfrom(minasastate* state,
8090 /* Real */ ae_vector* x,
8091 /* Real */ ae_vector* bndl,
8092 /* Real */ ae_vector* bndu,
8093 ae_state *_state);
8094void _minasastate_init(void* _p, ae_state *_state);
8095void _minasastate_init_copy(void* _dst, void* _src, ae_state *_state);
8096void _minasastate_clear(void* _p);
8097void _minasastate_destroy(void* _p);
8098void _minasareport_init(void* _p, ae_state *_state);
8099void _minasareport_init_copy(void* _dst, void* _src, ae_state *_state);
8100void _minasareport_clear(void* _p);
8101void _minasareport_destroy(void* _p);
8102void minlmcreatevj(ae_int_t n,
8103 ae_int_t m,
8104 /* Real */ ae_vector* x,
8105 minlmstate* state,
8106 ae_state *_state);
8107void minlmcreatev(ae_int_t n,
8108 ae_int_t m,
8109 /* Real */ ae_vector* x,
8110 double diffstep,
8111 minlmstate* state,
8112 ae_state *_state);
8113void minlmcreatefgh(ae_int_t n,
8114 /* Real */ ae_vector* x,
8115 minlmstate* state,
8116 ae_state *_state);
8117void minlmsetcond(minlmstate* state,
8118 double epsx,
8119 ae_int_t maxits,
8120 ae_state *_state);
8121void minlmsetxrep(minlmstate* state, ae_bool needxrep, ae_state *_state);
8122void minlmsetstpmax(minlmstate* state, double stpmax, ae_state *_state);
8123void minlmsetscale(minlmstate* state,
8124 /* Real */ ae_vector* s,
8125 ae_state *_state);
8126void minlmsetbc(minlmstate* state,
8127 /* Real */ ae_vector* bndl,
8128 /* Real */ ae_vector* bndu,
8129 ae_state *_state);
8130void minlmsetlc(minlmstate* state,
8131 /* Real */ ae_matrix* c,
8132 /* Integer */ ae_vector* ct,
8133 ae_int_t k,
8134 ae_state *_state);
8135void minlmsetacctype(minlmstate* state,
8136 ae_int_t acctype,
8137 ae_state *_state);
8138ae_bool minlmiteration(minlmstate* state, ae_state *_state);
8139void minlmresults(minlmstate* state,
8140 /* Real */ ae_vector* x,
8141 minlmreport* rep,
8142 ae_state *_state);
8143void minlmresultsbuf(minlmstate* state,
8144 /* Real */ ae_vector* x,
8145 minlmreport* rep,
8146 ae_state *_state);
8147void minlmrestartfrom(minlmstate* state,
8148 /* Real */ ae_vector* x,
8149 ae_state *_state);
8150void minlmrequesttermination(minlmstate* state, ae_state *_state);
8151void minlmcreatevgj(ae_int_t n,
8152 ae_int_t m,
8153 /* Real */ ae_vector* x,
8154 minlmstate* state,
8155 ae_state *_state);
8156void minlmcreatefgj(ae_int_t n,
8157 ae_int_t m,
8158 /* Real */ ae_vector* x,
8159 minlmstate* state,
8160 ae_state *_state);
8161void minlmcreatefj(ae_int_t n,
8162 ae_int_t m,
8163 /* Real */ ae_vector* x,
8164 minlmstate* state,
8165 ae_state *_state);
8166void minlmsetgradientcheck(minlmstate* state,
8167 double teststep,
8168 ae_state *_state);
8169void _minlmstepfinder_init(void* _p, ae_state *_state);
8170void _minlmstepfinder_init_copy(void* _dst, void* _src, ae_state *_state);
8171void _minlmstepfinder_clear(void* _p);
8172void _minlmstepfinder_destroy(void* _p);
8173void _minlmstate_init(void* _p, ae_state *_state);
8174void _minlmstate_init_copy(void* _dst, void* _src, ae_state *_state);
8175void _minlmstate_clear(void* _p);
8176void _minlmstate_destroy(void* _p);
8177void _minlmreport_init(void* _p, ae_state *_state);
8178void _minlmreport_init_copy(void* _dst, void* _src, ae_state *_state);
8179void _minlmreport_clear(void* _p);
8180void _minlmreport_destroy(void* _p);
8181
8182}
8183#endif
8184