annotate toolboxes/FullBNT-1.0.7/netlab3.3/olgd.m @ 0:e9a9cd732c1e tip

first hg version after svn
author wolffd
date Tue, 10 Feb 2015 15:05:51 +0000
parents
children
rev   line source
wolffd@0 1 function [net, options, errlog, pointlog] = olgd(net, options, x, t)
wolffd@0 2 %OLGD On-line gradient descent optimization.
wolffd@0 3 %
wolffd@0 4 % Description
wolffd@0 5 % [NET, OPTIONS, ERRLOG, POINTLOG] = OLGD(NET, OPTIONS, X, T) uses on-
wolffd@0 6 % line gradient descent to find a local minimum of the error function
wolffd@0 7 % for the network NET computed on the input data X and target values T.
wolffd@0 8 % A log of the error values after each cycle is (optionally) returned
wolffd@0 9 % in ERRLOG, and a log of the points visited is (optionally) returned
wolffd@0 10 % in POINTLOG. Because the gradient is computed on-line (i.e. after
wolffd@0 11 % each pattern) this can be quite inefficient in Matlab.
wolffd@0 12 %
wolffd@0 13 % The error function value at final weight vector is returned in
wolffd@0 14 % OPTIONS(8).
wolffd@0 15 %
wolffd@0 16 % The optional parameters have the following interpretations.
wolffd@0 17 %
wolffd@0 18 % OPTIONS(1) is set to 1 to display error values; also logs error
wolffd@0 19 % values in the return argument ERRLOG, and the points visited in the
wolffd@0 20 % return argument POINTSLOG. If OPTIONS(1) is set to 0, then only
wolffd@0 21 % warning messages are displayed. If OPTIONS(1) is -1, then nothing is
wolffd@0 22 % displayed.
wolffd@0 23 %
wolffd@0 24 % OPTIONS(2) is the precision required for the value of X at the
wolffd@0 25 % solution. If the absolute difference between the values of X between
wolffd@0 26 % two successive steps is less than OPTIONS(2), then this condition is
wolffd@0 27 % satisfied.
wolffd@0 28 %
wolffd@0 29 % OPTIONS(3) is the precision required of the objective function at the
wolffd@0 30 % solution. If the absolute difference between the error functions
wolffd@0 31 % between two successive steps is less than OPTIONS(3), then this
wolffd@0 32 % condition is satisfied. Both this and the previous condition must be
wolffd@0 33 % satisfied for termination. Note that testing the function value at
wolffd@0 34 % each iteration roughly halves the speed of the algorithm.
wolffd@0 35 %
wolffd@0 36 % OPTIONS(5) determines whether the patterns are sampled randomly with
wolffd@0 37 % replacement. If it is 0 (the default), then patterns are sampled in
wolffd@0 38 % order.
wolffd@0 39 %
wolffd@0 40 % OPTIONS(6) determines if the learning rate decays. If it is 1 then
wolffd@0 41 % the learning rate decays at a rate of 1/T. If it is 0 (the default)
wolffd@0 42 % then the learning rate is constant.
wolffd@0 43 %
wolffd@0 44 % OPTIONS(9) should be set to 1 to check the user defined gradient
wolffd@0 45 % function.
wolffd@0 46 %
wolffd@0 47 % OPTIONS(10) returns the total number of function evaluations
wolffd@0 48 % (including those in any line searches).
wolffd@0 49 %
wolffd@0 50 % OPTIONS(11) returns the total number of gradient evaluations.
wolffd@0 51 %
wolffd@0 52 % OPTIONS(14) is the maximum number of iterations (passes through the
wolffd@0 53 % complete pattern set); default 100.
wolffd@0 54 %
wolffd@0 55 % OPTIONS(17) is the momentum; default 0.5.
wolffd@0 56 %
wolffd@0 57 % OPTIONS(18) is the learning rate; default 0.01.
wolffd@0 58 %
wolffd@0 59 % See also
wolffd@0 60 % GRADDESC
wolffd@0 61 %
wolffd@0 62
wolffd@0 63 % Copyright (c) Ian T Nabney (1996-2001)
wolffd@0 64
wolffd@0 65 % Set up the options.
wolffd@0 66 if length(options) < 18
wolffd@0 67 error('Options vector too short')
wolffd@0 68 end
wolffd@0 69
wolffd@0 70 if (options(14))
wolffd@0 71 niters = options(14);
wolffd@0 72 else
wolffd@0 73 niters = 100;
wolffd@0 74 end
wolffd@0 75
wolffd@0 76 % Learning rate: must be positive
wolffd@0 77 if (options(18) > 0)
wolffd@0 78 eta = options(18);
wolffd@0 79 else
wolffd@0 80 eta = 0.01;
wolffd@0 81 end
wolffd@0 82 % Save initial learning rate for annealing
wolffd@0 83 lr = eta;
wolffd@0 84 % Momentum term: allow zero momentum
wolffd@0 85 if (options(17) >= 0)
wolffd@0 86 mu = options(17);
wolffd@0 87 else
wolffd@0 88 mu = 0.5;
wolffd@0 89 end
wolffd@0 90
wolffd@0 91 pakstr = [net.type, 'pak'];
wolffd@0 92 unpakstr = [net.type, 'unpak'];
wolffd@0 93
wolffd@0 94 % Extract initial weights from the network
wolffd@0 95 w = feval(pakstr, net);
wolffd@0 96
wolffd@0 97 display = options(1);
wolffd@0 98
wolffd@0 99 % Work out if we need to compute f at each iteration.
wolffd@0 100 % Needed if display results or if termination
wolffd@0 101 % criterion requires it.
wolffd@0 102 fcneval = (display | options(3));
wolffd@0 103
wolffd@0 104 % Check gradients
wolffd@0 105 if (options(9))
wolffd@0 106 feval('gradchek', w, 'neterr', 'netgrad', net, x, t);
wolffd@0 107 end
wolffd@0 108
wolffd@0 109 dwold = zeros(1, length(w));
wolffd@0 110 fold = 0; % Must be initialised so that termination test can be performed
wolffd@0 111 ndata = size(x, 1);
wolffd@0 112
wolffd@0 113 if fcneval
wolffd@0 114 fnew = neterr(w, net, x, t);
wolffd@0 115 options(10) = options(10) + 1;
wolffd@0 116 fold = fnew;
wolffd@0 117 end
wolffd@0 118
wolffd@0 119 j = 1;
wolffd@0 120 if nargout >= 3
wolffd@0 121 errlog(j, :) = fnew;
wolffd@0 122 if nargout == 4
wolffd@0 123 pointlog(j, :) = w;
wolffd@0 124 end
wolffd@0 125 end
wolffd@0 126
wolffd@0 127 % Main optimization loop.
wolffd@0 128 while j <= niters
wolffd@0 129 wold = w;
wolffd@0 130 if options(5)
wolffd@0 131 % Randomise order of pattern presentation: with replacement
wolffd@0 132 pnum = ceil(rand(ndata, 1).*ndata);
wolffd@0 133 else
wolffd@0 134 pnum = 1:ndata;
wolffd@0 135 end
wolffd@0 136 for k = 1:ndata
wolffd@0 137 grad = netgrad(w, net, x(pnum(k),:), t(pnum(k),:));
wolffd@0 138 if options(6)
wolffd@0 139 % Let learning rate decrease as 1/t
wolffd@0 140 lr = eta/((j-1)*ndata + k);
wolffd@0 141 end
wolffd@0 142 dw = mu*dwold - lr*grad;
wolffd@0 143 w = w + dw;
wolffd@0 144 dwold = dw;
wolffd@0 145 end
wolffd@0 146 options(11) = options(11) + 1; % Increment gradient evaluation count
wolffd@0 147 if fcneval
wolffd@0 148 fold = fnew;
wolffd@0 149 fnew = neterr(w, net, x, t);
wolffd@0 150 options(10) = options(10) + 1;
wolffd@0 151 end
wolffd@0 152 if display
wolffd@0 153 fprintf(1, 'Iteration %5d Error %11.8f\n', j, fnew);
wolffd@0 154 end
wolffd@0 155 j = j + 1;
wolffd@0 156 if nargout >= 3
wolffd@0 157 errlog(j) = fnew;
wolffd@0 158 if nargout == 4
wolffd@0 159 pointlog(j, :) = w;
wolffd@0 160 end
wolffd@0 161 end
wolffd@0 162 if (max(abs(w - wold)) < options(2) & abs(fnew - fold) < options(3))
wolffd@0 163 % Termination criteria are met
wolffd@0 164 options(8) = fnew;
wolffd@0 165 net = feval(unpakstr, net, w);
wolffd@0 166 return;
wolffd@0 167 end
wolffd@0 168 end
wolffd@0 169
wolffd@0 170 if fcneval
wolffd@0 171 options(8) = fnew;
wolffd@0 172 else
wolffd@0 173 % Return error on entire dataset
wolffd@0 174 options(8) = neterr(w, net, x, t);
wolffd@0 175 options(10) = options(10) + 1;
wolffd@0 176 end
wolffd@0 177 if (options(1) >= 0)
wolffd@0 178 disp(maxitmess);
wolffd@0 179 end
wolffd@0 180
wolffd@0 181 net = feval(unpakstr, net, w);