annotate toolboxes/FullBNT-1.0.7/netlab3.3/graddesc.m @ 0:e9a9cd732c1e tip

first hg version after svn
author wolffd
date Tue, 10 Feb 2015 15:05:51 +0000
parents
children
rev   line source
wolffd@0 1 function [x, options, flog, pointlog] = graddesc(f, x, options, gradf, ...
wolffd@0 2 varargin)
wolffd@0 3 %GRADDESC Gradient descent optimization.
wolffd@0 4 %
wolffd@0 5 % Description
wolffd@0 6 % [X, OPTIONS, FLOG, POINTLOG] = GRADDESC(F, X, OPTIONS, GRADF) uses
wolffd@0 7 % batch gradient descent to find a local minimum of the function F(X)
wolffd@0 8 % whose gradient is given by GRADF(X). A log of the function values
wolffd@0 9 % after each cycle is (optionally) returned in ERRLOG, and a log of the
wolffd@0 10 % points visited is (optionally) returned in POINTLOG.
wolffd@0 11 %
wolffd@0 12 % Note that X is a row vector and F returns a scalar value. The point
wolffd@0 13 % at which F has a local minimum is returned as X. The function value
wolffd@0 14 % at that point is returned in OPTIONS(8).
wolffd@0 15 %
wolffd@0 16 % GRADDESC(F, X, OPTIONS, GRADF, P1, P2, ...) allows additional
wolffd@0 17 % arguments to be passed to F() and GRADF().
wolffd@0 18 %
wolffd@0 19 % The optional parameters have the following interpretations.
wolffd@0 20 %
wolffd@0 21 % OPTIONS(1) is set to 1 to display error values; also logs error
wolffd@0 22 % values in the return argument ERRLOG, and the points visited in the
wolffd@0 23 % return argument POINTSLOG. If OPTIONS(1) is set to 0, then only
wolffd@0 24 % warning messages are displayed. If OPTIONS(1) is -1, then nothing is
wolffd@0 25 % displayed.
wolffd@0 26 %
wolffd@0 27 % OPTIONS(2) is the absolute precision required for the value of X at
wolffd@0 28 % the solution. If the absolute difference between the values of X
wolffd@0 29 % between two successive steps is less than OPTIONS(2), then this
wolffd@0 30 % condition is satisfied.
wolffd@0 31 %
wolffd@0 32 % OPTIONS(3) is a measure of the precision required of the objective
wolffd@0 33 % function at the solution. If the absolute difference between the
wolffd@0 34 % objective function values between two successive steps is less than
wolffd@0 35 % OPTIONS(3), then this condition is satisfied. Both this and the
wolffd@0 36 % previous condition must be satisfied for termination.
wolffd@0 37 %
wolffd@0 38 % OPTIONS(7) determines the line minimisation method used. If it is
wolffd@0 39 % set to 1 then a line minimiser is used (in the direction of the
wolffd@0 40 % negative gradient). If it is 0 (the default), then each parameter
wolffd@0 41 % update is a fixed multiple (the learning rate) of the negative
wolffd@0 42 % gradient added to a fixed multiple (the momentum) of the previous
wolffd@0 43 % parameter update.
wolffd@0 44 %
wolffd@0 45 % OPTIONS(9) should be set to 1 to check the user defined gradient
wolffd@0 46 % function GRADF with GRADCHEK. This is carried out at the initial
wolffd@0 47 % parameter vector X.
wolffd@0 48 %
wolffd@0 49 % OPTIONS(10) returns the total number of function evaluations
wolffd@0 50 % (including those in any line searches).
wolffd@0 51 %
wolffd@0 52 % OPTIONS(11) returns the total number of gradient evaluations.
wolffd@0 53 %
wolffd@0 54 % OPTIONS(14) is the maximum number of iterations; default 100.
wolffd@0 55 %
wolffd@0 56 % OPTIONS(15) is the precision in parameter space of the line search;
wolffd@0 57 % default FOPTIONS(2).
wolffd@0 58 %
wolffd@0 59 % OPTIONS(17) is the momentum; default 0.5. It should be scaled by the
wolffd@0 60 % inverse of the number of data points.
wolffd@0 61 %
wolffd@0 62 % OPTIONS(18) is the learning rate; default 0.01. It should be scaled
wolffd@0 63 % by the inverse of the number of data points.
wolffd@0 64 %
wolffd@0 65 % See also
wolffd@0 66 % CONJGRAD, LINEMIN, OLGD, MINBRACK, QUASINEW, SCG
wolffd@0 67 %
wolffd@0 68
wolffd@0 69 % Copyright (c) Ian T Nabney (1996-2001)
wolffd@0 70
wolffd@0 71 % Set up the options.
wolffd@0 72 if length(options) < 18
wolffd@0 73 error('Options vector too short')
wolffd@0 74 end
wolffd@0 75
wolffd@0 76 if (options(14))
wolffd@0 77 niters = options(14);
wolffd@0 78 else
wolffd@0 79 niters = 100;
wolffd@0 80 end
wolffd@0 81
wolffd@0 82 line_min_flag = 0; % Flag for line minimisation option
wolffd@0 83 if (round(options(7)) == 1)
wolffd@0 84 % Use line minimisation
wolffd@0 85 line_min_flag = 1;
wolffd@0 86 % Set options for line minimiser
wolffd@0 87 line_options = foptions;
wolffd@0 88 if options(15) > 0
wolffd@0 89 line_options(2) = options(15);
wolffd@0 90 end
wolffd@0 91 else
wolffd@0 92 % Learning rate: must be positive
wolffd@0 93 if (options(18) > 0)
wolffd@0 94 eta = options(18);
wolffd@0 95 else
wolffd@0 96 eta = 0.01;
wolffd@0 97 end
wolffd@0 98 % Momentum term: allow zero momentum
wolffd@0 99 if (options(17) >= 0)
wolffd@0 100 mu = options(17);
wolffd@0 101 else
wolffd@0 102 mu = 0.5;
wolffd@0 103 end
wolffd@0 104 end
wolffd@0 105
wolffd@0 106 % Check function string
wolffd@0 107 f = fcnchk(f, length(varargin));
wolffd@0 108 gradf = fcnchk(gradf, length(varargin));
wolffd@0 109
wolffd@0 110 % Display information if options(1) > 0
wolffd@0 111 display = options(1) > 0;
wolffd@0 112
wolffd@0 113 % Work out if we need to compute f at each iteration.
wolffd@0 114 % Needed if using line search or if display results or if termination
wolffd@0 115 % criterion requires it.
wolffd@0 116 fcneval = (options(7) | display | options(3));
wolffd@0 117
wolffd@0 118 % Check gradients
wolffd@0 119 if (options(9) > 0)
wolffd@0 120 feval('gradchek', x, f, gradf, varargin{:});
wolffd@0 121 end
wolffd@0 122
wolffd@0 123 dxold = zeros(1, size(x, 2));
wolffd@0 124 xold = x;
wolffd@0 125 fold = 0; % Must be initialised so that termination test can be performed
wolffd@0 126 if fcneval
wolffd@0 127 fnew = feval(f, x, varargin{:});
wolffd@0 128 options(10) = options(10) + 1;
wolffd@0 129 fold = fnew;
wolffd@0 130 end
wolffd@0 131
wolffd@0 132 % Main optimization loop.
wolffd@0 133 for j = 1:niters
wolffd@0 134 xold = x;
wolffd@0 135 grad = feval(gradf, x, varargin{:});
wolffd@0 136 options(11) = options(11) + 1; % Increment gradient evaluation counter
wolffd@0 137 if (line_min_flag ~= 1)
wolffd@0 138 dx = mu*dxold - eta*grad;
wolffd@0 139 x = x + dx;
wolffd@0 140 dxold = dx;
wolffd@0 141 if fcneval
wolffd@0 142 fold = fnew;
wolffd@0 143 fnew = feval(f, x, varargin{:});
wolffd@0 144 options(10) = options(10) + 1;
wolffd@0 145 end
wolffd@0 146 else
wolffd@0 147 sd = - grad./norm(grad); % New search direction.
wolffd@0 148 fold = fnew;
wolffd@0 149 % Do a line search: normalise search direction to have length 1
wolffd@0 150 [lmin, line_options] = feval('linemin', f, x, sd, fold, ...
wolffd@0 151 line_options, varargin{:});
wolffd@0 152 options(10) = options(10) + line_options(10);
wolffd@0 153 x = xold + lmin*sd;
wolffd@0 154 fnew = line_options(8);
wolffd@0 155 end
wolffd@0 156 if nargout >= 3
wolffd@0 157 flog(j) = fnew;
wolffd@0 158 if nargout >= 4
wolffd@0 159 pointlog(j, :) = x;
wolffd@0 160 end
wolffd@0 161 end
wolffd@0 162 if display
wolffd@0 163 fprintf(1, 'Cycle %5d Function %11.8f\n', j, fnew);
wolffd@0 164 end
wolffd@0 165 if (max(abs(x - xold)) < options(2) & abs(fnew - fold) < options(3))
wolffd@0 166 % Termination criteria are met
wolffd@0 167 options(8) = fnew;
wolffd@0 168 return;
wolffd@0 169 end
wolffd@0 170 end
wolffd@0 171
wolffd@0 172 if fcneval
wolffd@0 173 options(8) = fnew;
wolffd@0 174 else
wolffd@0 175 options(8) = feval(f, x, varargin{:});
wolffd@0 176 options(10) = options(10) + 1;
wolffd@0 177 end
wolffd@0 178 if (options(1) >= 0)
wolffd@0 179 disp(maxitmess);
wolffd@0 180 end