comparison toolboxes/FullBNT-1.0.7/netlab3.3/scg.m @ 0:e9a9cd732c1e tip

first hg version after svn
author wolffd
date Tue, 10 Feb 2015 15:05:51 +0000
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:e9a9cd732c1e
1 function [x, options, flog, pointlog, scalelog] = scg(f, x, options, gradf, varargin)
2 %SCG Scaled conjugate gradient optimization.
3 %
4 % Description
5 % [X, OPTIONS] = SCG(F, X, OPTIONS, GRADF) uses a scaled conjugate
6 % gradients algorithm to find a local minimum of the function F(X)
7 % whose gradient is given by GRADF(X). Here X is a row vector and F
8 % returns a scalar value. The point at which F has a local minimum is
9 % returned as X. The function value at that point is returned in
10 % OPTIONS(8).
11 %
12 % [X, OPTIONS, FLOG, POINTLOG, SCALELOG] = SCG(F, X, OPTIONS, GRADF)
13 % also returns (optionally) a log of the function values after each
14 % cycle in FLOG, a log of the points visited in POINTLOG, and a log of
15 % the scale values in the algorithm in SCALELOG.
16 %
17 % SCG(F, X, OPTIONS, GRADF, P1, P2, ...) allows additional arguments to
18 % be passed to F() and GRADF(). The optional parameters have the
19 % following interpretations.
20 %
21 % OPTIONS(1) is set to 1 to display error values; also logs error
22 % values in the return argument ERRLOG, and the points visited in the
23 % return argument POINTSLOG. If OPTIONS(1) is set to 0, then only
24 % warning messages are displayed. If OPTIONS(1) is -1, then nothing is
25 % displayed.
26 %
27 % OPTIONS(2) is a measure of the absolute precision required for the
28 % value of X at the solution. If the absolute difference between the
29 % values of X between two successive steps is less than OPTIONS(2),
30 % then this condition is satisfied.
31 %
32 % OPTIONS(3) is a measure of the precision required of the objective
33 % function at the solution. If the absolute difference between the
34 % objective function values between two successive steps is less than
35 % OPTIONS(3), then this condition is satisfied. Both this and the
36 % previous condition must be satisfied for termination.
37 %
38 % OPTIONS(9) is set to 1 to check the user defined gradient function.
39 %
40 % OPTIONS(10) returns the total number of function evaluations
41 % (including those in any line searches).
42 %
43 % OPTIONS(11) returns the total number of gradient evaluations.
44 %
45 % OPTIONS(14) is the maximum number of iterations; default 100.
46 %
47 % See also
48 % CONJGRAD, QUASINEW
49 %
50
51 % Copyright (c) Ian T Nabney (1996-2001)
52
53 % Set up the options.
54 if length(options) < 18
55 error('Options vector too short')
56 end
57
58 if(options(14))
59 niters = options(14);
60 else
61 niters = 100;
62 end
63
64 display = options(1);
65 gradcheck = options(9);
66
67 % Set up strings for evaluating function and gradient
68 f = fcnchk(f, length(varargin));
69 gradf = fcnchk(gradf, length(varargin));
70
71 nparams = length(x);
72
73 % Check gradients
74 if (gradcheck)
75 feval('gradchek', x, f, gradf, varargin{:});
76 end
77
78 sigma0 = 1.0e-4;
79 fold = feval(f, x, varargin{:}); % Initial function value.
80 fnow = fold;
81 options(10) = options(10) + 1; % Increment function evaluation counter.
82 gradnew = feval(gradf, x, varargin{:}); % Initial gradient.
83 gradold = gradnew;
84 options(11) = options(11) + 1; % Increment gradient evaluation counter.
85 d = -gradnew; % Initial search direction.
86 success = 1; % Force calculation of directional derivs.
87 nsuccess = 0; % nsuccess counts number of successes.
88 beta = 1.0; % Initial scale parameter.
89 betamin = 1.0e-15; % Lower bound on scale.
90 betamax = 1.0e100; % Upper bound on scale.
91 j = 1; % j counts number of iterations.
92 if nargout >= 3
93 flog(j, :) = fold;
94 if nargout == 4
95 pointlog(j, :) = x;
96 end
97 end
98
99 % Main optimization loop.
100 while (j <= niters)
101
102 % Calculate first and second directional derivatives.
103 if (success == 1)
104 mu = d*gradnew';
105 if (mu >= 0)
106 d = - gradnew;
107 mu = d*gradnew';
108 end
109 kappa = d*d';
110 if kappa < eps
111 options(8) = fnow;
112 return
113 end
114 sigma = sigma0/sqrt(kappa);
115 xplus = x + sigma*d;
116 gplus = feval(gradf, xplus, varargin{:});
117 options(11) = options(11) + 1;
118 theta = (d*(gplus' - gradnew'))/sigma;
119 end
120
121 % Increase effective curvature and evaluate step size alpha.
122 delta = theta + beta*kappa;
123 if (delta <= 0)
124 delta = beta*kappa;
125 beta = beta - theta/kappa;
126 end
127 alpha = - mu/delta;
128
129 % Calculate the comparison ratio.
130 xnew = x + alpha*d;
131 fnew = feval(f, xnew, varargin{:});
132 options(10) = options(10) + 1;
133 Delta = 2*(fnew - fold)/(alpha*mu);
134 if (Delta >= 0)
135 success = 1;
136 nsuccess = nsuccess + 1;
137 x = xnew;
138 fnow = fnew;
139 else
140 success = 0;
141 fnow = fold;
142 end
143
144 if nargout >= 3
145 % Store relevant variables
146 flog(j) = fnow; % Current function value
147 if nargout >= 4
148 pointlog(j,:) = x; % Current position
149 if nargout >= 5
150 scalelog(j) = beta; % Current scale parameter
151 end
152 end
153 end
154 if display > 0
155 fprintf(1, 'Cycle %4d Error %11.6f Scale %e\n', j, fnow, beta);
156 end
157
158 if (success == 1)
159 % Test for termination
160
161 if (max(abs(alpha*d)) < options(2) & max(abs(fnew-fold)) < options(3))
162 options(8) = fnew;
163 return;
164
165 else
166 % Update variables for new position
167 fold = fnew;
168 gradold = gradnew;
169 gradnew = feval(gradf, x, varargin{:});
170 options(11) = options(11) + 1;
171 % If the gradient is zero then we are done.
172 if (gradnew*gradnew' == 0)
173 options(8) = fnew;
174 return;
175 end
176 end
177 end
178
179 % Adjust beta according to comparison ratio.
180 if (Delta < 0.25)
181 beta = min(4.0*beta, betamax);
182 end
183 if (Delta > 0.75)
184 beta = max(0.5*beta, betamin);
185 end
186
187 % Update search direction using Polak-Ribiere formula, or re-start
188 % in direction of negative gradient after nparams steps.
189 if (nsuccess == nparams)
190 d = -gradnew;
191 nsuccess = 0;
192 else
193 if (success == 1)
194 gamma = (gradold - gradnew)*gradnew'/(mu);
195 d = gamma*d - gradnew;
196 end
197 end
198 j = j + 1;
199 end
200
201 % If we get here, then we haven't terminated in the given number of
202 % iterations.
203
204 options(8) = fold;
205 if (options(1) >= 0)
206 disp(maxitmess);
207 end
208