Mercurial > hg > camir-aes2014
comparison toolboxes/FullBNT-1.0.7/netlab3.3/somtrain.m @ 0:e9a9cd732c1e tip
first hg version after svn
author | wolffd |
---|---|
date | Tue, 10 Feb 2015 15:05:51 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:e9a9cd732c1e |
---|---|
1 function net = somtrain(net, options, x) | |
2 %SOMTRAIN Kohonen training algorithm for SOM. | |
3 % | |
4 % Description | |
5 % NET = SOMTRAIN{NET, OPTIONS, X) uses Kohonen's algorithm to train a | |
6 % SOM. Both on-line and batch algorithms are implemented. The learning | |
7 % rate (for on-line) and neighbourhood size decay linearly. There is no | |
8 % error function minimised during training (so there is no termination | |
9 % criterion other than the number of epochs), but the sum-of-squares | |
10 % is computed and returned in OPTIONS(8). | |
11 % | |
12 % The optional parameters have the following interpretations. | |
13 % | |
14 % OPTIONS(1) is set to 1 to display error values; also logs learning | |
15 % rate ALPHA and neighbourhood size NSIZE. Otherwise nothing is | |
16 % displayed. | |
17 % | |
18 % OPTIONS(5) determines whether the patterns are sampled randomly with | |
19 % replacement. If it is 0 (the default), then patterns are sampled in | |
20 % order. This is only relevant to the on-line algorithm. | |
21 % | |
22 % OPTIONS(6) determines if the on-line or batch algorithm is used. If | |
23 % it is 1 then the batch algorithm is used. If it is 0 (the default) | |
24 % then the on-line algorithm is used. | |
25 % | |
26 % OPTIONS(14) is the maximum number of iterations (passes through the | |
27 % complete pattern set); default 100. | |
28 % | |
29 % OPTIONS(15) is the final neighbourhood size; default value is the | |
30 % same as the initial neighbourhood size. | |
31 % | |
32 % OPTIONS(16) is the final learning rate; default value is the same as | |
33 % the initial learning rate. | |
34 % | |
35 % OPTIONS(17) is the initial neighbourhood size; default 0.5*maximum | |
36 % map size. | |
37 % | |
38 % OPTIONS(18) is the initial learning rate; default 0.9. This | |
39 % parameter must be positive. | |
40 % | |
41 % See also | |
42 % KMEANS, SOM, SOMFWD | |
43 % | |
44 | |
45 % Copyright (c) Ian T Nabney (1996-2001) | |
46 | |
47 % Check arguments for consistency | |
48 errstring = consist(net, 'som', x); | |
49 if ~isempty(errstring) | |
50 error(errstring); | |
51 end | |
52 | |
53 % Set number of iterations in convergence phase | |
54 if (~options(14)) | |
55 options(14) = 100; | |
56 end | |
57 niters = options(14); | |
58 | |
59 % Learning rate must be positive | |
60 if (options(18) > 0) | |
61 alpha_first = options(18); | |
62 else | |
63 alpha_first = 0.9; | |
64 end | |
65 % Final learning rate must be no greater than initial learning rate | |
66 if (options(16) > alpha_first | options(16) < 0) | |
67 alpha_last = alpha_first; | |
68 else | |
69 alpha_last = options(16); | |
70 end | |
71 | |
72 % Neighbourhood size | |
73 if (options(17) >= 0) | |
74 nsize_first = options(17); | |
75 else | |
76 nsize_first = max(net.map_dim)/2; | |
77 end | |
78 % Final neighbourhood size must be no greater than initial size | |
79 if (options(15) > nsize_first | options(15) < 0) | |
80 nsize_last = nsize_first; | |
81 else | |
82 nsize_last = options(15); | |
83 end | |
84 | |
85 ndata = size(x, 1); | |
86 | |
87 if options(6) | |
88 % Batch algorithm | |
89 H = zeros(ndata, net.num_nodes); | |
90 end | |
91 % Put weights into matrix form | |
92 tempw = sompak(net); | |
93 | |
94 % Then carry out training | |
95 j = 1; | |
96 while j <= niters | |
97 if options(6) | |
98 % Batch version of algorithm | |
99 alpha = 0.0; | |
100 frac_done = (niters - j)/niters; | |
101 % Compute neighbourhood | |
102 nsize = round((nsize_first - nsize_last)*frac_done + nsize_last); | |
103 | |
104 % Find winning node: put weights back into net so that we can | |
105 % call somunpak | |
106 net = somunpak(net, tempw); | |
107 [temp, bnode] = somfwd(net, x); | |
108 for k = 1:ndata | |
109 H(k, :) = reshape(net.inode_dist(:, :, bnode(k))<=nsize, ... | |
110 1, net.num_nodes); | |
111 end | |
112 s = sum(H, 1); | |
113 for k = 1:net.num_nodes | |
114 if s(k) > 0 | |
115 tempw(k, :) = sum((H(:, k)*ones(1, net.nin)).*x, 1)/ ... | |
116 s(k); | |
117 end | |
118 end | |
119 else | |
120 % On-line version of algorithm | |
121 if options(5) | |
122 % Randomise order of pattern presentation: with replacement | |
123 pnum = ceil(rand(ndata, 1).*ndata); | |
124 else | |
125 pnum = 1:ndata; | |
126 end | |
127 % Cycle through dataset | |
128 for k = 1:ndata | |
129 % Fraction done | |
130 frac_done = (((niters+1)*ndata)-(j*ndata + k))/((niters+1)*ndata); | |
131 % Compute learning rate | |
132 alpha = (alpha_first - alpha_last)*frac_done + alpha_last; | |
133 % Compute neighbourhood | |
134 nsize = round((nsize_first - nsize_last)*frac_done + nsize_last); | |
135 % Find best node | |
136 pat_diff = ones(net.num_nodes, 1)*x(pnum(k), :) - tempw; | |
137 [temp, bnode] = min(sum(abs(pat_diff), 2)); | |
138 | |
139 % Now update neighbourhood | |
140 neighbourhood = (net.inode_dist(:, :, bnode) <= nsize); | |
141 tempw = tempw + ... | |
142 ((alpha*(neighbourhood(:)))*ones(1, net.nin)).*pat_diff; | |
143 end | |
144 end | |
145 if options(1) | |
146 % Print iteration information | |
147 fprintf(1, 'Iteration %d; alpha = %f, nsize = %f. ', j, alpha, ... | |
148 nsize); | |
149 % Print sum squared error to nearest node | |
150 d2 = dist2(tempw, x); | |
151 fprintf(1, 'Error = %f\n', sum(min(d2))); | |
152 end | |
153 j = j + 1; | |
154 end | |
155 | |
156 net = somunpak(net, tempw); | |
157 options(8) = sum(min(dist2(tempw, x))); |