annotate toolboxes/FullBNT-1.0.7/netlab3.3/quasinew.m @ 0:e9a9cd732c1e tip

first hg version after svn
author wolffd
date Tue, 10 Feb 2015 15:05:51 +0000
parents
children
rev   line source
wolffd@0 1 function [x, options, flog, pointlog] = quasinew(f, x, options, gradf, ...
wolffd@0 2 varargin)
wolffd@0 3 %QUASINEW Quasi-Newton optimization.
wolffd@0 4 %
wolffd@0 5 % Description
wolffd@0 6 % [X, OPTIONS, FLOG, POINTLOG] = QUASINEW(F, X, OPTIONS, GRADF) uses a
wolffd@0 7 % quasi-Newton algorithm to find a local minimum of the function F(X)
wolffd@0 8 % whose gradient is given by GRADF(X). Here X is a row vector and F
wolffd@0 9 % returns a scalar value. The point at which F has a local minimum is
wolffd@0 10 % returned as X. The function value at that point is returned in
wolffd@0 11 % OPTIONS(8). A log of the function values after each cycle is
wolffd@0 12 % (optionally) returned in FLOG, and a log of the points visited is
wolffd@0 13 % (optionally) returned in POINTLOG.
wolffd@0 14 %
wolffd@0 15 % QUASINEW(F, X, OPTIONS, GRADF, P1, P2, ...) allows additional
wolffd@0 16 % arguments to be passed to F() and GRADF().
wolffd@0 17 %
wolffd@0 18 % The optional parameters have the following interpretations.
wolffd@0 19 %
wolffd@0 20 % OPTIONS(1) is set to 1 to display error values; also logs error
wolffd@0 21 % values in the return argument ERRLOG, and the points visited in the
wolffd@0 22 % return argument POINTSLOG. If OPTIONS(1) is set to 0, then only
wolffd@0 23 % warning messages are displayed. If OPTIONS(1) is -1, then nothing is
wolffd@0 24 % displayed.
wolffd@0 25 %
wolffd@0 26 % OPTIONS(2) is a measure of the absolute precision required for the
wolffd@0 27 % value of X at the solution. If the absolute difference between the
wolffd@0 28 % values of X between two successive steps is less than OPTIONS(2),
wolffd@0 29 % then this condition is satisfied.
wolffd@0 30 %
wolffd@0 31 % OPTIONS(3) is a measure of the precision required of the objective
wolffd@0 32 % function at the solution. If the absolute difference between the
wolffd@0 33 % objective function values between two successive steps is less than
wolffd@0 34 % OPTIONS(3), then this condition is satisfied. Both this and the
wolffd@0 35 % previous condition must be satisfied for termination.
wolffd@0 36 %
wolffd@0 37 % OPTIONS(9) should be set to 1 to check the user defined gradient
wolffd@0 38 % function.
wolffd@0 39 %
wolffd@0 40 % OPTIONS(10) returns the total number of function evaluations
wolffd@0 41 % (including those in any line searches).
wolffd@0 42 %
wolffd@0 43 % OPTIONS(11) returns the total number of gradient evaluations.
wolffd@0 44 %
wolffd@0 45 % OPTIONS(14) is the maximum number of iterations; default 100.
wolffd@0 46 %
wolffd@0 47 % OPTIONS(15) is the precision in parameter space of the line search;
wolffd@0 48 % default 1E-2.
wolffd@0 49 %
wolffd@0 50 % See also
wolffd@0 51 % CONJGRAD, GRADDESC, LINEMIN, MINBRACK, SCG
wolffd@0 52 %
wolffd@0 53
wolffd@0 54 % Copyright (c) Ian T Nabney (1996-2001)
wolffd@0 55
wolffd@0 56 % Set up the options.
wolffd@0 57 if length(options) < 18
wolffd@0 58 error('Options vector too short')
wolffd@0 59 end
wolffd@0 60
wolffd@0 61 if(options(14))
wolffd@0 62 niters = options(14);
wolffd@0 63 else
wolffd@0 64 niters = 100;
wolffd@0 65 end
wolffd@0 66
wolffd@0 67 % Set up options for line search
wolffd@0 68 line_options = foptions;
wolffd@0 69 % Don't need a very precise line search
wolffd@0 70 if options(15) > 0
wolffd@0 71 line_options(2) = options(15);
wolffd@0 72 else
wolffd@0 73 line_options(2) = 1e-2; % Default
wolffd@0 74 end
wolffd@0 75 % Minimal fractional change in f from Newton step: otherwise do a line search
wolffd@0 76 min_frac_change = 1e-4;
wolffd@0 77
wolffd@0 78 display = options(1);
wolffd@0 79
wolffd@0 80 % Next two lines allow quasinew to work with expression strings
wolffd@0 81 f = fcnchk(f, length(varargin));
wolffd@0 82 gradf = fcnchk(gradf, length(varargin));
wolffd@0 83
wolffd@0 84 % Check gradients
wolffd@0 85 if (options(9))
wolffd@0 86 feval('gradchek', x, f, gradf, varargin{:});
wolffd@0 87 end
wolffd@0 88
wolffd@0 89 nparams = length(x);
wolffd@0 90 fnew = feval(f, x, varargin{:});
wolffd@0 91 options(10) = options(10) + 1;
wolffd@0 92 gradnew = feval(gradf, x, varargin{:});
wolffd@0 93 options(11) = options(11) + 1;
wolffd@0 94 p = -gradnew; % Search direction
wolffd@0 95 hessinv = eye(nparams); % Initialise inverse Hessian to be identity matrix
wolffd@0 96 j = 1;
wolffd@0 97 if nargout >= 3
wolffd@0 98 flog(j, :) = fnew;
wolffd@0 99 if nargout == 4
wolffd@0 100 pointlog(j, :) = x;
wolffd@0 101 end
wolffd@0 102 end
wolffd@0 103
wolffd@0 104 while (j <= niters)
wolffd@0 105
wolffd@0 106 xold = x;
wolffd@0 107 fold = fnew;
wolffd@0 108 gradold = gradnew;
wolffd@0 109
wolffd@0 110 x = xold + p;
wolffd@0 111 fnew = feval(f, x, varargin{:});
wolffd@0 112 options(10) = options(10) + 1;
wolffd@0 113
wolffd@0 114 % This shouldn't occur, but rest of code depends on sd being downhill
wolffd@0 115 if (gradnew*p' >= 0)
wolffd@0 116 p = -p;
wolffd@0 117 if options(1) >= 0
wolffd@0 118 warning('search direction uphill in quasinew');
wolffd@0 119 end
wolffd@0 120 end
wolffd@0 121
wolffd@0 122 % Does the Newton step reduce the function value sufficiently?
wolffd@0 123 if (fnew >= fold + min_frac_change * (gradnew*p'))
wolffd@0 124 % No it doesn't
wolffd@0 125 % Minimize along current search direction: must be less than Newton step
wolffd@0 126 [lmin, line_options] = feval('linemin', f, xold, p, fold, ...
wolffd@0 127 line_options, varargin{:});
wolffd@0 128 options(10) = options(10) + line_options(10);
wolffd@0 129 options(11) = options(11) + line_options(11);
wolffd@0 130 % Correct x and fnew to be the actual search point we have found
wolffd@0 131 x = xold + lmin * p;
wolffd@0 132 p = x - xold;
wolffd@0 133 fnew = line_options(8);
wolffd@0 134 end
wolffd@0 135
wolffd@0 136 % Check for termination
wolffd@0 137 if (max(abs(x - xold)) < options(2) & max(abs(fnew - fold)) < options(3))
wolffd@0 138 options(8) = fnew;
wolffd@0 139 return;
wolffd@0 140 end
wolffd@0 141 gradnew = feval(gradf, x, varargin{:});
wolffd@0 142 options(11) = options(11) + 1;
wolffd@0 143 v = gradnew - gradold;
wolffd@0 144 vdotp = v*p';
wolffd@0 145
wolffd@0 146 % Skip update to inverse Hessian if fac not sufficiently positive
wolffd@0 147 if (vdotp*vdotp > eps*sum(v.^2)*sum(p.^2))
wolffd@0 148 Gv = (hessinv*v')';
wolffd@0 149 vGv = sum(v.*Gv);
wolffd@0 150 u = p./vdotp - Gv./vGv;
wolffd@0 151 % Use BFGS update rule
wolffd@0 152 hessinv = hessinv + (p'*p)/vdotp - (Gv'*Gv)/vGv + vGv*(u'*u);
wolffd@0 153 end
wolffd@0 154
wolffd@0 155 p = -(hessinv * gradnew')';
wolffd@0 156
wolffd@0 157 if (display > 0)
wolffd@0 158 fprintf(1, 'Cycle %4d Function %11.6f\n', j, fnew);
wolffd@0 159 end
wolffd@0 160
wolffd@0 161 j = j + 1;
wolffd@0 162 if nargout >= 3
wolffd@0 163 flog(j, :) = fnew;
wolffd@0 164 if nargout == 4
wolffd@0 165 pointlog(j, :) = x;
wolffd@0 166 end
wolffd@0 167 end
wolffd@0 168 end
wolffd@0 169
wolffd@0 170 % If we get here, then we haven't terminated in the given number of
wolffd@0 171 % iterations.
wolffd@0 172
wolffd@0 173 options(8) = fold;
wolffd@0 174 if (options(1) >= 0)
wolffd@0 175 disp(maxitmess);
wolffd@0 176 end