wolffd@0
|
1 function net = somtrain(net, options, x)
|
wolffd@0
|
2 %SOMTRAIN Kohonen training algorithm for SOM.
|
wolffd@0
|
3 %
|
wolffd@0
|
4 % Description
|
wolffd@0
|
5 % NET = SOMTRAIN{NET, OPTIONS, X) uses Kohonen's algorithm to train a
|
wolffd@0
|
6 % SOM. Both on-line and batch algorithms are implemented. The learning
|
wolffd@0
|
7 % rate (for on-line) and neighbourhood size decay linearly. There is no
|
wolffd@0
|
8 % error function minimised during training (so there is no termination
|
wolffd@0
|
9 % criterion other than the number of epochs), but the sum-of-squares
|
wolffd@0
|
10 % is computed and returned in OPTIONS(8).
|
wolffd@0
|
11 %
|
wolffd@0
|
12 % The optional parameters have the following interpretations.
|
wolffd@0
|
13 %
|
wolffd@0
|
14 % OPTIONS(1) is set to 1 to display error values; also logs learning
|
wolffd@0
|
15 % rate ALPHA and neighbourhood size NSIZE. Otherwise nothing is
|
wolffd@0
|
16 % displayed.
|
wolffd@0
|
17 %
|
wolffd@0
|
18 % OPTIONS(5) determines whether the patterns are sampled randomly with
|
wolffd@0
|
19 % replacement. If it is 0 (the default), then patterns are sampled in
|
wolffd@0
|
20 % order. This is only relevant to the on-line algorithm.
|
wolffd@0
|
21 %
|
wolffd@0
|
22 % OPTIONS(6) determines if the on-line or batch algorithm is used. If
|
wolffd@0
|
23 % it is 1 then the batch algorithm is used. If it is 0 (the default)
|
wolffd@0
|
24 % then the on-line algorithm is used.
|
wolffd@0
|
25 %
|
wolffd@0
|
26 % OPTIONS(14) is the maximum number of iterations (passes through the
|
wolffd@0
|
27 % complete pattern set); default 100.
|
wolffd@0
|
28 %
|
wolffd@0
|
29 % OPTIONS(15) is the final neighbourhood size; default value is the
|
wolffd@0
|
30 % same as the initial neighbourhood size.
|
wolffd@0
|
31 %
|
wolffd@0
|
32 % OPTIONS(16) is the final learning rate; default value is the same as
|
wolffd@0
|
33 % the initial learning rate.
|
wolffd@0
|
34 %
|
wolffd@0
|
35 % OPTIONS(17) is the initial neighbourhood size; default 0.5*maximum
|
wolffd@0
|
36 % map size.
|
wolffd@0
|
37 %
|
wolffd@0
|
38 % OPTIONS(18) is the initial learning rate; default 0.9. This
|
wolffd@0
|
39 % parameter must be positive.
|
wolffd@0
|
40 %
|
wolffd@0
|
41 % See also
|
wolffd@0
|
42 % KMEANS, SOM, SOMFWD
|
wolffd@0
|
43 %
|
wolffd@0
|
44
|
wolffd@0
|
45 % Copyright (c) Ian T Nabney (1996-2001)
|
wolffd@0
|
46
|
wolffd@0
|
47 % Check arguments for consistency
|
wolffd@0
|
48 errstring = consist(net, 'som', x);
|
wolffd@0
|
49 if ~isempty(errstring)
|
wolffd@0
|
50 error(errstring);
|
wolffd@0
|
51 end
|
wolffd@0
|
52
|
wolffd@0
|
53 % Set number of iterations in convergence phase
|
wolffd@0
|
54 if (~options(14))
|
wolffd@0
|
55 options(14) = 100;
|
wolffd@0
|
56 end
|
wolffd@0
|
57 niters = options(14);
|
wolffd@0
|
58
|
wolffd@0
|
59 % Learning rate must be positive
|
wolffd@0
|
60 if (options(18) > 0)
|
wolffd@0
|
61 alpha_first = options(18);
|
wolffd@0
|
62 else
|
wolffd@0
|
63 alpha_first = 0.9;
|
wolffd@0
|
64 end
|
wolffd@0
|
65 % Final learning rate must be no greater than initial learning rate
|
wolffd@0
|
66 if (options(16) > alpha_first | options(16) < 0)
|
wolffd@0
|
67 alpha_last = alpha_first;
|
wolffd@0
|
68 else
|
wolffd@0
|
69 alpha_last = options(16);
|
wolffd@0
|
70 end
|
wolffd@0
|
71
|
wolffd@0
|
72 % Neighbourhood size
|
wolffd@0
|
73 if (options(17) >= 0)
|
wolffd@0
|
74 nsize_first = options(17);
|
wolffd@0
|
75 else
|
wolffd@0
|
76 nsize_first = max(net.map_dim)/2;
|
wolffd@0
|
77 end
|
wolffd@0
|
78 % Final neighbourhood size must be no greater than initial size
|
wolffd@0
|
79 if (options(15) > nsize_first | options(15) < 0)
|
wolffd@0
|
80 nsize_last = nsize_first;
|
wolffd@0
|
81 else
|
wolffd@0
|
82 nsize_last = options(15);
|
wolffd@0
|
83 end
|
wolffd@0
|
84
|
wolffd@0
|
85 ndata = size(x, 1);
|
wolffd@0
|
86
|
wolffd@0
|
87 if options(6)
|
wolffd@0
|
88 % Batch algorithm
|
wolffd@0
|
89 H = zeros(ndata, net.num_nodes);
|
wolffd@0
|
90 end
|
wolffd@0
|
91 % Put weights into matrix form
|
wolffd@0
|
92 tempw = sompak(net);
|
wolffd@0
|
93
|
wolffd@0
|
94 % Then carry out training
|
wolffd@0
|
95 j = 1;
|
wolffd@0
|
96 while j <= niters
|
wolffd@0
|
97 if options(6)
|
wolffd@0
|
98 % Batch version of algorithm
|
wolffd@0
|
99 alpha = 0.0;
|
wolffd@0
|
100 frac_done = (niters - j)/niters;
|
wolffd@0
|
101 % Compute neighbourhood
|
wolffd@0
|
102 nsize = round((nsize_first - nsize_last)*frac_done + nsize_last);
|
wolffd@0
|
103
|
wolffd@0
|
104 % Find winning node: put weights back into net so that we can
|
wolffd@0
|
105 % call somunpak
|
wolffd@0
|
106 net = somunpak(net, tempw);
|
wolffd@0
|
107 [temp, bnode] = somfwd(net, x);
|
wolffd@0
|
108 for k = 1:ndata
|
wolffd@0
|
109 H(k, :) = reshape(net.inode_dist(:, :, bnode(k))<=nsize, ...
|
wolffd@0
|
110 1, net.num_nodes);
|
wolffd@0
|
111 end
|
wolffd@0
|
112 s = sum(H, 1);
|
wolffd@0
|
113 for k = 1:net.num_nodes
|
wolffd@0
|
114 if s(k) > 0
|
wolffd@0
|
115 tempw(k, :) = sum((H(:, k)*ones(1, net.nin)).*x, 1)/ ...
|
wolffd@0
|
116 s(k);
|
wolffd@0
|
117 end
|
wolffd@0
|
118 end
|
wolffd@0
|
119 else
|
wolffd@0
|
120 % On-line version of algorithm
|
wolffd@0
|
121 if options(5)
|
wolffd@0
|
122 % Randomise order of pattern presentation: with replacement
|
wolffd@0
|
123 pnum = ceil(rand(ndata, 1).*ndata);
|
wolffd@0
|
124 else
|
wolffd@0
|
125 pnum = 1:ndata;
|
wolffd@0
|
126 end
|
wolffd@0
|
127 % Cycle through dataset
|
wolffd@0
|
128 for k = 1:ndata
|
wolffd@0
|
129 % Fraction done
|
wolffd@0
|
130 frac_done = (((niters+1)*ndata)-(j*ndata + k))/((niters+1)*ndata);
|
wolffd@0
|
131 % Compute learning rate
|
wolffd@0
|
132 alpha = (alpha_first - alpha_last)*frac_done + alpha_last;
|
wolffd@0
|
133 % Compute neighbourhood
|
wolffd@0
|
134 nsize = round((nsize_first - nsize_last)*frac_done + nsize_last);
|
wolffd@0
|
135 % Find best node
|
wolffd@0
|
136 pat_diff = ones(net.num_nodes, 1)*x(pnum(k), :) - tempw;
|
wolffd@0
|
137 [temp, bnode] = min(sum(abs(pat_diff), 2));
|
wolffd@0
|
138
|
wolffd@0
|
139 % Now update neighbourhood
|
wolffd@0
|
140 neighbourhood = (net.inode_dist(:, :, bnode) <= nsize);
|
wolffd@0
|
141 tempw = tempw + ...
|
wolffd@0
|
142 ((alpha*(neighbourhood(:)))*ones(1, net.nin)).*pat_diff;
|
wolffd@0
|
143 end
|
wolffd@0
|
144 end
|
wolffd@0
|
145 if options(1)
|
wolffd@0
|
146 % Print iteration information
|
wolffd@0
|
147 fprintf(1, 'Iteration %d; alpha = %f, nsize = %f. ', j, alpha, ...
|
wolffd@0
|
148 nsize);
|
wolffd@0
|
149 % Print sum squared error to nearest node
|
wolffd@0
|
150 d2 = dist2(tempw, x);
|
wolffd@0
|
151 fprintf(1, 'Error = %f\n', sum(min(d2)));
|
wolffd@0
|
152 end
|
wolffd@0
|
153 j = j + 1;
|
wolffd@0
|
154 end
|
wolffd@0
|
155
|
wolffd@0
|
156 net = somunpak(net, tempw);
|
wolffd@0
|
157 options(8) = sum(min(dist2(tempw, x))); |