comparison toolboxes/FullBNT-1.0.7/netlab3.3/olgd.m @ 0:e9a9cd732c1e tip

first hg version after svn
author wolffd
date Tue, 10 Feb 2015 15:05:51 +0000
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:e9a9cd732c1e
1 function [net, options, errlog, pointlog] = olgd(net, options, x, t)
2 %OLGD On-line gradient descent optimization.
3 %
4 % Description
5 % [NET, OPTIONS, ERRLOG, POINTLOG] = OLGD(NET, OPTIONS, X, T) uses on-
6 % line gradient descent to find a local minimum of the error function
7 % for the network NET computed on the input data X and target values T.
8 % A log of the error values after each cycle is (optionally) returned
9 % in ERRLOG, and a log of the points visited is (optionally) returned
10 % in POINTLOG. Because the gradient is computed on-line (i.e. after
11 % each pattern) this can be quite inefficient in Matlab.
12 %
13 % The error function value at final weight vector is returned in
14 % OPTIONS(8).
15 %
16 % The optional parameters have the following interpretations.
17 %
18 % OPTIONS(1) is set to 1 to display error values; also logs error
19 % values in the return argument ERRLOG, and the points visited in the
20 % return argument POINTSLOG. If OPTIONS(1) is set to 0, then only
21 % warning messages are displayed. If OPTIONS(1) is -1, then nothing is
22 % displayed.
23 %
24 % OPTIONS(2) is the precision required for the value of X at the
25 % solution. If the absolute difference between the values of X between
26 % two successive steps is less than OPTIONS(2), then this condition is
27 % satisfied.
28 %
29 % OPTIONS(3) is the precision required of the objective function at the
30 % solution. If the absolute difference between the error functions
31 % between two successive steps is less than OPTIONS(3), then this
32 % condition is satisfied. Both this and the previous condition must be
33 % satisfied for termination. Note that testing the function value at
34 % each iteration roughly halves the speed of the algorithm.
35 %
36 % OPTIONS(5) determines whether the patterns are sampled randomly with
37 % replacement. If it is 0 (the default), then patterns are sampled in
38 % order.
39 %
40 % OPTIONS(6) determines if the learning rate decays. If it is 1 then
41 % the learning rate decays at a rate of 1/T. If it is 0 (the default)
42 % then the learning rate is constant.
43 %
44 % OPTIONS(9) should be set to 1 to check the user defined gradient
45 % function.
46 %
47 % OPTIONS(10) returns the total number of function evaluations
48 % (including those in any line searches).
49 %
50 % OPTIONS(11) returns the total number of gradient evaluations.
51 %
52 % OPTIONS(14) is the maximum number of iterations (passes through the
53 % complete pattern set); default 100.
54 %
55 % OPTIONS(17) is the momentum; default 0.5.
56 %
57 % OPTIONS(18) is the learning rate; default 0.01.
58 %
59 % See also
60 % GRADDESC
61 %
62
63 % Copyright (c) Ian T Nabney (1996-2001)
64
65 % Set up the options.
66 if length(options) < 18
67 error('Options vector too short')
68 end
69
70 if (options(14))
71 niters = options(14);
72 else
73 niters = 100;
74 end
75
76 % Learning rate: must be positive
77 if (options(18) > 0)
78 eta = options(18);
79 else
80 eta = 0.01;
81 end
82 % Save initial learning rate for annealing
83 lr = eta;
84 % Momentum term: allow zero momentum
85 if (options(17) >= 0)
86 mu = options(17);
87 else
88 mu = 0.5;
89 end
90
91 pakstr = [net.type, 'pak'];
92 unpakstr = [net.type, 'unpak'];
93
94 % Extract initial weights from the network
95 w = feval(pakstr, net);
96
97 display = options(1);
98
99 % Work out if we need to compute f at each iteration.
100 % Needed if display results or if termination
101 % criterion requires it.
102 fcneval = (display | options(3));
103
104 % Check gradients
105 if (options(9))
106 feval('gradchek', w, 'neterr', 'netgrad', net, x, t);
107 end
108
109 dwold = zeros(1, length(w));
110 fold = 0; % Must be initialised so that termination test can be performed
111 ndata = size(x, 1);
112
113 if fcneval
114 fnew = neterr(w, net, x, t);
115 options(10) = options(10) + 1;
116 fold = fnew;
117 end
118
119 j = 1;
120 if nargout >= 3
121 errlog(j, :) = fnew;
122 if nargout == 4
123 pointlog(j, :) = w;
124 end
125 end
126
127 % Main optimization loop.
128 while j <= niters
129 wold = w;
130 if options(5)
131 % Randomise order of pattern presentation: with replacement
132 pnum = ceil(rand(ndata, 1).*ndata);
133 else
134 pnum = 1:ndata;
135 end
136 for k = 1:ndata
137 grad = netgrad(w, net, x(pnum(k),:), t(pnum(k),:));
138 if options(6)
139 % Let learning rate decrease as 1/t
140 lr = eta/((j-1)*ndata + k);
141 end
142 dw = mu*dwold - lr*grad;
143 w = w + dw;
144 dwold = dw;
145 end
146 options(11) = options(11) + 1; % Increment gradient evaluation count
147 if fcneval
148 fold = fnew;
149 fnew = neterr(w, net, x, t);
150 options(10) = options(10) + 1;
151 end
152 if display
153 fprintf(1, 'Iteration %5d Error %11.8f\n', j, fnew);
154 end
155 j = j + 1;
156 if nargout >= 3
157 errlog(j) = fnew;
158 if nargout == 4
159 pointlog(j, :) = w;
160 end
161 end
162 if (max(abs(w - wold)) < options(2) & abs(fnew - fold) < options(3))
163 % Termination criteria are met
164 options(8) = fnew;
165 net = feval(unpakstr, net, w);
166 return;
167 end
168 end
169
170 if fcneval
171 options(8) = fnew;
172 else
173 % Return error on entire dataset
174 options(8) = neterr(w, net, x, t);
175 options(10) = options(10) + 1;
176 end
177 if (options(1) >= 0)
178 disp(maxitmess);
179 end
180
181 net = feval(unpakstr, net, w);