comparison toolboxes/FullBNT-1.0.7/netlab3.3/conjgrad.m @ 0:e9a9cd732c1e tip

first hg version after svn
author wolffd
date Tue, 10 Feb 2015 15:05:51 +0000
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:e9a9cd732c1e
1 function [x, options, flog, pointlog] = conjgrad(f, x, options, gradf, ...
2 varargin)
3 %CONJGRAD Conjugate gradients optimization.
4 %
5 % Description
6 % [X, OPTIONS, FLOG, POINTLOG] = CONJGRAD(F, X, OPTIONS, GRADF) uses a
7 % conjugate gradients algorithm to find the minimum of the function
8 % F(X) whose gradient is given by GRADF(X). Here X is a row vector and
9 % F returns a scalar value. The point at which F has a local minimum
10 % is returned as X. The function value at that point is returned in
11 % OPTIONS(8). A log of the function values after each cycle is
12 % (optionally) returned in FLOG, and a log of the points visited is
13 % (optionally) returned in POINTLOG.
14 %
15 % CONJGRAD(F, X, OPTIONS, GRADF, P1, P2, ...) allows additional
16 % arguments to be passed to F() and GRADF().
17 %
18 % The optional parameters have the following interpretations.
19 %
20 % OPTIONS(1) is set to 1 to display error values; also logs error
21 % values in the return argument ERRLOG, and the points visited in the
22 % return argument POINTSLOG. If OPTIONS(1) is set to 0, then only
23 % warning messages are displayed. If OPTIONS(1) is -1, then nothing is
24 % displayed.
25 %
26 % OPTIONS(2) is a measure of the absolute precision required for the
27 % value of X at the solution. If the absolute difference between the
28 % values of X between two successive steps is less than OPTIONS(2),
29 % then this condition is satisfied.
30 %
31 % OPTIONS(3) is a measure of the precision required of the objective
32 % function at the solution. If the absolute difference between the
33 % objective function values between two successive steps is less than
34 % OPTIONS(3), then this condition is satisfied. Both this and the
35 % previous condition must be satisfied for termination.
36 %
37 % OPTIONS(9) is set to 1 to check the user defined gradient function.
38 %
39 % OPTIONS(10) returns the total number of function evaluations
40 % (including those in any line searches).
41 %
42 % OPTIONS(11) returns the total number of gradient evaluations.
43 %
44 % OPTIONS(14) is the maximum number of iterations; default 100.
45 %
46 % OPTIONS(15) is the precision in parameter space of the line search;
47 % default 1E-4.
48 %
49 % See also
50 % GRADDESC, LINEMIN, MINBRACK, QUASINEW, SCG
51 %
52
53 % Copyright (c) Ian T Nabney (1996-2001)
54
55 % Set up the options.
56 if length(options) < 18
57 error('Options vector too short')
58 end
59
60 if(options(14))
61 niters = options(14);
62 else
63 niters = 100;
64 end
65
66 % Set up options for line search
67 line_options = foptions;
68 % Need a precise line search for success
69 if options(15) > 0
70 line_options(2) = options(15);
71 else
72 line_options(2) = 1e-4;
73 end
74
75 display = options(1);
76
77 % Next two lines allow conjgrad to work with expression strings
78 f = fcnchk(f, length(varargin));
79 gradf = fcnchk(gradf, length(varargin));
80
81 % Check gradients
82 if (options(9))
83 feval('gradchek', x, f, gradf, varargin{:});
84 end
85
86 options(10) = 0;
87 options(11) = 0;
88 nparams = length(x);
89 fnew = feval(f, x, varargin{:});
90 options(10) = options(10) + 1;
91 gradnew = feval(gradf, x, varargin{:});
92 options(11) = options(11) + 1;
93 d = -gradnew; % Initial search direction
94 br_min = 0;
95 br_max = 1.0; % Initial value for maximum distance to search along
96 tol = sqrt(eps);
97
98 j = 1;
99 if nargout >= 3
100 flog(j, :) = fnew;
101 if nargout == 4
102 pointlog(j, :) = x;
103 end
104 end
105
106 while (j <= niters)
107
108 xold = x;
109 fold = fnew;
110 gradold = gradnew;
111
112 gg = gradold*gradold';
113 if (gg == 0.0)
114 % If the gradient is zero then we are done.
115 options(8) = fnew;
116 return;
117 end
118
119 % This shouldn't occur, but rest of code depends on d being downhill
120 if (gradnew*d' > 0)
121 d = -d;
122 if options(1) >= 0
123 warning('search direction uphill in conjgrad');
124 end
125 end
126
127 line_sd = d./norm(d);
128 [lmin, line_options] = feval('linemin', f, xold, line_sd, fold, ...
129 line_options, varargin{:});
130 options(10) = options(10) + line_options(10);
131 options(11) = options(11) + line_options(11);
132 % Set x and fnew to be the actual search point we have found
133 x = xold + lmin * line_sd;
134 fnew = line_options(8);
135
136 % Check for termination
137 if (max(abs(x - xold)) < options(2) & max(abs(fnew - fold)) < options(3))
138 options(8) = fnew;
139 return;
140 end
141
142 gradnew = feval(gradf, x, varargin{:});
143 options(11) = options(11) + 1;
144
145 % Use Polak-Ribiere formula to update search direction
146 gamma = ((gradnew - gradold)*(gradnew)')/gg;
147 d = (d .* gamma) - gradnew;
148
149 if (display > 0)
150 fprintf(1, 'Cycle %4d Function %11.6f\n', j, line_options(8));
151 end
152
153 j = j + 1;
154 if nargout >= 3
155 flog(j, :) = fnew;
156 if nargout == 4
157 pointlog(j, :) = x;
158 end
159 end
160 end
161
162 % If we get here, then we haven't terminated in the given number of
163 % iterations.
164
165 options(8) = fold;
166 if (options(1) >= 0)
167 disp(maxitmess);
168 end