Mercurial > hg > camir-aes2014
comparison toolboxes/MIRtoolbox1.3.2/somtoolbox/som_seqtrain.m @ 0:e9a9cd732c1e tip
first hg version after svn
author | wolffd |
---|---|
date | Tue, 10 Feb 2015 15:05:51 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:e9a9cd732c1e |
---|---|
1 function [sMap, sTrain] = som_seqtrain(sMap, D, varargin) | |
2 | |
3 %SOM_SEQTRAIN Use sequential algorithm to train the Self-Organizing Map. | |
4 % | |
5 % [sM,sT] = som_seqtrain(sM, D, [[argID,] value, ...]) | |
6 % | |
7 % sM = som_seqtrain(sM,D); | |
8 % sM = som_seqtrain(sM,sD,'alpha_type','power','tracking',3); | |
9 % [M,sT] = som_seqtrain(M,D,'ep','trainlen',10,'inv','hexa'); | |
10 % | |
11 % Input and output arguments ([]'s are optional): | |
12 % sM (struct) map struct, the trained and updated map is returned | |
13 % (matrix) codebook matrix of a self-organizing map | |
14 % size munits x dim or msize(1) x ... x msize(k) x dim | |
15 % The trained map codebook is returned. | |
16 % D (struct) training data; data struct | |
17 % (matrix) training data, size dlen x dim | |
18 % [argID, (string) See below. The values which are unambiguous can | |
19 % value] (varies) be given without the preceeding argID. | |
20 % | |
21 % sT (struct) learning parameters used during the training | |
22 % | |
23 % Here are the valid argument IDs and corresponding values. The values which | |
24 % are unambiguous (marked with '*') can be given without the preceeding argID. | |
25 % 'mask' (vector) BMU search mask, size dim x 1 | |
26 % 'msize' (vector) map size | |
27 % 'radius' (vector) neighborhood radiuses, length 1, 2 or trainlen | |
28 % 'radius_ini' (scalar) initial training radius | |
29 % 'radius_fin' (scalar) final training radius | |
30 % 'alpha' (vector) learning rates, length trainlen | |
31 % 'alpha_ini' (scalar) initial learning rate | |
32 % 'tracking' (scalar) tracking level, 0-3 | |
33 % 'trainlen' (scalar) training length | |
34 % 'trainlen_type' *(string) is the given trainlen 'samples' or 'epochs' | |
35 % 'train' *(struct) train struct, parameters for training | |
36 % 'sTrain','som_train ' = 'train' | |
37 % 'alpha_type' *(string) learning rate function, 'inv', 'linear' or 'power' | |
38 % 'sample_order'*(string) order of samples: 'random' or 'ordered' | |
39 % 'neigh' *(string) neighborhood function, 'gaussian', 'cutgauss', | |
40 % 'ep' or 'bubble' | |
41 % 'topol' *(struct) topology struct | |
42 % 'som_topol','sTopo l' = 'topol' | |
43 % 'lattice' *(string) map lattice, 'hexa' or 'rect' | |
44 % 'shape' *(string) map shape, 'sheet', 'cyl' or 'toroid' | |
45 % | |
46 % For more help, try 'type som_seqtrain' or check out online documentation. | |
47 % See also SOM_MAKE, SOM_BATCHTRAIN, SOM_TRAIN_STRUCT. | |
48 | |
49 %%%%%%%%%%%%% DETAILED DESCRIPTION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
50 % | |
51 % som_seqtrain | |
52 % | |
53 % PURPOSE | |
54 % | |
55 % Trains a Self-Organizing Map using the sequential algorithm. | |
56 % | |
57 % SYNTAX | |
58 % | |
59 % sM = som_seqtrain(sM,D); | |
60 % sM = som_seqtrain(sM,sD); | |
61 % sM = som_seqtrain(...,'argID',value,...); | |
62 % sM = som_seqtrain(...,value,...); | |
63 % [sM,sT] = som_seqtrain(M,D,...); | |
64 % | |
65 % DESCRIPTION | |
66 % | |
67 % Trains the given SOM (sM or M above) with the given training data | |
68 % (sD or D) using sequential SOM training algorithm. If no optional | |
69 % arguments (argID, value) are given, a default training is done, the | |
70 % parameters are obtained from SOM_TRAIN_STRUCT function. Using | |
71 % optional arguments the training parameters can be specified. Returns | |
72 % the trained and updated SOM and a train struct which contains | |
73 % information on the training. | |
74 % | |
75 % REFERENCES | |
76 % | |
77 % Kohonen, T., "Self-Organizing Map", 2nd ed., Springer-Verlag, | |
78 % Berlin, 1995, pp. 78-82. | |
79 % Kohonen, T., "Clustering, Taxonomy, and Topological Maps of | |
80 % Patterns", International Conference on Pattern Recognition | |
81 % (ICPR), 1982, pp. 114-128. | |
82 % Kohonen, T., "Self-Organized formation of topologically correct | |
83 % feature maps", Biological Cybernetics 43, 1982, pp. 59-69. | |
84 % | |
85 % REQUIRED INPUT ARGUMENTS | |
86 % | |
87 % sM The map to be trained. | |
88 % (struct) map struct | |
89 % (matrix) codebook matrix (field .data of map struct) | |
90 % Size is either [munits dim], in which case the map grid | |
91 % dimensions (msize) should be specified with optional arguments, | |
92 % or [msize(1) ... msize(k) dim] in which case the map | |
93 % grid dimensions are taken from the size of the matrix. | |
94 % Lattice, by default, is 'rect' and shape 'sheet'. | |
95 % D Training data. | |
96 % (struct) data struct | |
97 % (matrix) data matrix, size [dlen dim] | |
98 % | |
99 % OPTIONAL INPUT ARGUMENTS | |
100 % | |
101 % argID (string) Argument identifier string (see below). | |
102 % value (varies) Value for the argument (see below). | |
103 % | |
104 % The optional arguments can be given as 'argID',value -pairs. If an | |
105 % argument is given value multiple times, the last one is | |
106 % used. The valid IDs and corresponding values are listed below. The values | |
107 % which are unambiguous (marked with '*') can be given without the | |
108 % preceeding argID. | |
109 % | |
110 % 'mask' (vector) BMU search mask, size dim x 1. Default is | |
111 % the one in sM (field '.mask') or a vector of | |
112 % ones if only a codebook matrix was given. | |
113 % 'msize' (vector) map grid dimensions. Default is the one | |
114 % in sM (field sM.topol.msize) or | |
115 % 'si = size(sM); msize = si(1:end-1);' | |
116 % if only a codebook matrix was given. | |
117 % 'radius' (vector) neighborhood radius | |
118 % length = 1: radius_ini = radius | |
119 % length = 2: [radius_ini radius_fin] = radius | |
120 % length > 2: the vector given neighborhood | |
121 % radius for each step separately | |
122 % trainlen = length(radius) | |
123 % 'radius_ini' (scalar) initial training radius | |
124 % 'radius_fin' (scalar) final training radius | |
125 % 'alpha' (vector) learning rate | |
126 % length = 1: alpha_ini = alpha | |
127 % length > 1: the vector gives learning rate | |
128 % for each step separately | |
129 % trainlen is set to length(alpha) | |
130 % alpha_type is set to 'user defined' | |
131 % 'alpha_ini' (scalar) initial learning rate | |
132 % 'tracking' (scalar) tracking level: 0, 1 (default), 2 or 3 | |
133 % 0 - estimate time | |
134 % 1 - track time and quantization error | |
135 % 2 - plot quantization error | |
136 % 3 - plot quantization error and two first | |
137 % components | |
138 % 'trainlen' (scalar) training length (see also 'tlen_type') | |
139 % 'trainlen_type' *(string) is the trainlen argument given in 'epochs' | |
140 % or in 'samples'. Default is 'epochs'. | |
141 % 'sample_order'*(string) is the sample order 'random' (which is the | |
142 % the default) or 'ordered' in which case | |
143 % samples are taken in the order in which they | |
144 % appear in the data set | |
145 % 'train' *(struct) train struct, parameters for training. | |
146 % Default parameters, unless specified, | |
147 % are acquired using SOM_TRAIN_STRUCT (this | |
148 % also applies for 'trainlen', 'alpha_type', | |
149 % 'alpha_ini', 'radius_ini' and 'radius_fin'). | |
150 % 'sTrain', 'som_train' (struct) = 'train' | |
151 % 'neigh' *(string) The used neighborhood function. Default is | |
152 % the one in sM (field '.neigh') or 'gaussian' | |
153 % if only a codebook matrix was given. Other | |
154 % possible values is 'cutgauss', 'ep' and 'bubble'. | |
155 % 'topol' *(struct) topology of the map. Default is the one | |
156 % in sM (field '.topol'). | |
157 % 'sTopol', 'som_topol' (struct) = 'topol' | |
158 % 'alpha_type'*(string) learning rate function, 'inv', 'linear' or 'power' | |
159 % 'lattice' *(string) map lattice. Default is the one in sM | |
160 % (field sM.topol.lattice) or 'rect' | |
161 % if only a codebook matrix was given. | |
162 % 'shape' *(string) map shape. Default is the one in sM | |
163 % (field sM.topol.shape) or 'sheet' | |
164 % if only a codebook matrix was given. | |
165 % | |
166 % OUTPUT ARGUMENTS | |
167 % | |
168 % sM the trained map | |
169 % (struct) if a map struct was given as input argument, a | |
170 % map struct is also returned. The current training | |
171 % is added to the training history (sM.trainhist). | |
172 % The 'neigh' and 'mask' fields of the map struct | |
173 % are updated to match those of the training. | |
174 % (matrix) if a matrix was given as input argument, a matrix | |
175 % is also returned with the same size as the input | |
176 % argument. | |
177 % sT (struct) train struct; information of the accomplished training | |
178 % | |
179 % EXAMPLES | |
180 % | |
181 % Simplest case: | |
182 % sM = som_seqtrain(sM,D); | |
183 % sM = som_seqtrain(sM,sD); | |
184 % | |
185 % To change the tracking level, 'tracking' argument is specified: | |
186 % sM = som_seqtrain(sM,D,'tracking',3); | |
187 % | |
188 % The change training parameters, the optional arguments 'train', | |
189 % 'neigh','mask','trainlen','radius','radius_ini', 'radius_fin', | |
190 % 'alpha', 'alpha_type' and 'alpha_ini' are used. | |
191 % sM = som_seqtrain(sM,D,'neigh','cutgauss','trainlen',10,'radius_fin',0); | |
192 % | |
193 % Another way to specify training parameters is to create a train struct: | |
194 % sTrain = som_train_struct(sM,'dlen',size(D,1),'algorithm','seq'); | |
195 % sTrain = som_set(sTrain,'neigh','cutgauss'); | |
196 % sM = som_seqtrain(sM,D,sTrain); | |
197 % | |
198 % By default the neighborhood radius goes linearly from radius_ini to | |
199 % radius_fin. If you want to change this, you can use the 'radius' argument | |
200 % to specify the neighborhood radius for each step separately: | |
201 % sM = som_seqtrain(sM,D,'radius',[5 3 1 1 1 1 0.5 0.5 0.5]); | |
202 % | |
203 % By default the learning rate (alpha) goes from the alpha_ini to 0 | |
204 % along the function defined by alpha_type. If you want to change this, | |
205 % you can use the 'alpha' argument to specify the learning rate | |
206 % for each step separately: | |
207 % alpha = 0.2*(1 - log([1:100])); | |
208 % sM = som_seqtrain(sM,D,'alpha',alpha); | |
209 % | |
210 % You don't necessarily have to use the map struct, but you can operate | |
211 % directly with codebook matrices. However, in this case you have to | |
212 % specify the topology of the map in the optional arguments. The | |
213 % following commads are identical (M is originally a 200 x dim sized matrix): | |
214 % M = som_seqtrain(M,D,'msize',[20 10],'lattice','hexa','shape','cyl'); | |
215 % | |
216 % M = som_seqtrain(M,D,'msize',[20 10],'hexa','cyl'); | |
217 % | |
218 % sT= som_set('som_topol','msize',[20 10],'lattice','hexa','shape','cyl'); | |
219 % M = som_seqtrain(M,D,sT); | |
220 % | |
221 % M = reshape(M,[20 10 dim]); | |
222 % M = som_seqtrain(M,D,'hexa','cyl'); | |
223 % | |
224 % The som_seqtrain also returns a train struct with information on the | |
225 % accomplished training. This is the same one as is added to the end of the | |
226 % trainhist field of map struct, in case a map struct is given. | |
227 % [M,sTrain] = som_seqtrain(M,D,'msize',[20 10]); | |
228 % | |
229 % [sM,sTrain] = som_seqtrain(sM,D); % sM.trainhist{end}==sTrain | |
230 % | |
231 % SEE ALSO | |
232 % | |
233 % som_make Initialize and train a SOM using default parameters. | |
234 % som_batchtrain Train SOM with batch algorithm. | |
235 % som_train_struct Determine default training parameters. | |
236 | |
237 % Copyright (c) 1997-2000 by the SOM toolbox programming team. | |
238 % http://www.cis.hut.fi/projects/somtoolbox/ | |
239 | |
240 % Version 1.0beta juuso 220997 | |
241 % Version 2.0beta juuso 101199 | |
242 | |
243 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
244 %% Check arguments | |
245 | |
246 error(nargchk(2, Inf, nargin)); % check the number of input arguments | |
247 | |
248 % map | |
249 struct_mode = isstruct(sMap); | |
250 if struct_mode, | |
251 sTopol = sMap.topol; | |
252 else | |
253 orig_size = size(sMap); | |
254 if ndims(sMap) > 2, | |
255 si = size(sMap); dim = si(end); msize = si(1:end-1); | |
256 M = reshape(sMap,[prod(msize) dim]); | |
257 else | |
258 msize = [orig_size(1) 1]; | |
259 dim = orig_size(2); | |
260 end | |
261 sMap = som_map_struct(dim,'msize',msize); | |
262 sTopol = sMap.topol; | |
263 end | |
264 [munits dim] = size(sMap.codebook); | |
265 | |
266 % data | |
267 if isstruct(D), | |
268 data_name = D.name; | |
269 D = D.data; | |
270 else | |
271 data_name = inputname(2); | |
272 end | |
273 D = D(find(sum(isnan(D),2) < dim),:); % remove empty vectors from the data | |
274 [dlen ddim] = size(D); % check input dimension | |
275 if dim ~= ddim, error('Map and data input space dimensions disagree.'); end | |
276 | |
277 % varargin | |
278 sTrain = som_set('som_train','algorithm','seq','neigh', ... | |
279 sMap.neigh,'mask',sMap.mask,'data_name',data_name); | |
280 radius = []; | |
281 alpha = []; | |
282 tracking = 1; | |
283 sample_order_type = 'random'; | |
284 tlen_type = 'epochs'; | |
285 | |
286 i=1; | |
287 while i<=length(varargin), | |
288 argok = 1; | |
289 if ischar(varargin{i}), | |
290 switch varargin{i}, | |
291 % argument IDs | |
292 case 'msize', i=i+1; sTopol.msize = varargin{i}; | |
293 case 'lattice', i=i+1; sTopol.lattice = varargin{i}; | |
294 case 'shape', i=i+1; sTopol.shape = varargin{i}; | |
295 case 'mask', i=i+1; sTrain.mask = varargin{i}; | |
296 case 'neigh', i=i+1; sTrain.neigh = varargin{i}; | |
297 case 'trainlen', i=i+1; sTrain.trainlen = varargin{i}; | |
298 case 'trainlen_type', i=i+1; tlen_type = varargin{i}; | |
299 case 'tracking', i=i+1; tracking = varargin{i}; | |
300 case 'sample_order', i=i+1; sample_order_type = varargin{i}; | |
301 case 'radius_ini', i=i+1; sTrain.radius_ini = varargin{i}; | |
302 case 'radius_fin', i=i+1; sTrain.radius_fin = varargin{i}; | |
303 case 'radius', | |
304 i=i+1; | |
305 l = length(varargin{i}); | |
306 if l==1, | |
307 sTrain.radius_ini = varargin{i}; | |
308 else | |
309 sTrain.radius_ini = varargin{i}(1); | |
310 sTrain.radius_fin = varargin{i}(end); | |
311 if l>2, radius = varargin{i}; tlen_type = 'samples'; end | |
312 end | |
313 case 'alpha_type', i=i+1; sTrain.alpha_type = varargin{i}; | |
314 case 'alpha_ini', i=i+1; sTrain.alpha_ini = varargin{i}; | |
315 case 'alpha', | |
316 i=i+1; | |
317 sTrain.alpha_ini = varargin{i}(1); | |
318 if length(varargin{i})>1, | |
319 alpha = varargin{i}; tlen_type = 'samples'; | |
320 sTrain.alpha_type = 'user defined'; | |
321 end | |
322 case {'sTrain','train','som_train'}, i=i+1; sTrain = varargin{i}; | |
323 case {'topol','sTopol','som_topol'}, | |
324 i=i+1; | |
325 sTopol = varargin{i}; | |
326 if prod(sTopol.msize) ~= munits, | |
327 error('Given map grid size does not match the codebook size.'); | |
328 end | |
329 % unambiguous values | |
330 case {'inv','linear','power'}, sTrain.alpha_type = varargin{i}; | |
331 case {'hexa','rect'}, sTopol.lattice = varargin{i}; | |
332 case {'sheet','cyl','toroid'}, sTopol.shape = varargin{i}; | |
333 case {'gaussian','cutgauss','ep','bubble'}, sTrain.neigh = varargin{i}; | |
334 case {'epochs','samples'}, tlen_type = varargin{i}; | |
335 case {'random', 'ordered'}, sample_order_type = varargin{i}; | |
336 otherwise argok=0; | |
337 end | |
338 elseif isstruct(varargin{i}) & isfield(varargin{i},'type'), | |
339 switch varargin{i}(1).type, | |
340 case 'som_topol', | |
341 sTopol = varargin{i}; | |
342 if prod(sTopol.msize) ~= munits, | |
343 error('Given map grid size does not match the codebook size.'); | |
344 end | |
345 case 'som_train', sTrain = varargin{i}; | |
346 otherwise argok=0; | |
347 end | |
348 else | |
349 argok = 0; | |
350 end | |
351 if ~argok, | |
352 disp(['(som_seqtrain) Ignoring invalid argument #' num2str(i+2)]); | |
353 end | |
354 i = i+1; | |
355 end | |
356 | |
357 % training length | |
358 if ~isempty(radius) | ~isempty(alpha), | |
359 lr = length(radius); | |
360 la = length(alpha); | |
361 if lr>2 | la>1, | |
362 tlen_type = 'samples'; | |
363 if lr> 2 & la<=1, sTrain.trainlen = lr; | |
364 elseif lr<=2 & la> 1, sTrain.trainlen = la; | |
365 elseif lr==la, sTrain.trainlen = la; | |
366 else | |
367 error('Mismatch between radius and learning rate vector lengths.') | |
368 end | |
369 end | |
370 end | |
371 if strcmp(tlen_type,'samples'), sTrain.trainlen = sTrain.trainlen/dlen; end | |
372 | |
373 % check topology | |
374 if struct_mode, | |
375 if ~strcmp(sTopol.lattice,sMap.topol.lattice) | ... | |
376 ~strcmp(sTopol.shape,sMap.topol.shape) | ... | |
377 any(sTopol.msize ~= sMap.topol.msize), | |
378 warning('Changing the original map topology.'); | |
379 end | |
380 end | |
381 sMap.topol = sTopol; | |
382 | |
383 % complement the training struct | |
384 sTrain = som_train_struct(sTrain,sMap,'dlen',dlen); | |
385 if isempty(sTrain.mask), sTrain.mask = ones(dim,1); end | |
386 | |
387 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
388 %% initialize | |
389 | |
390 M = sMap.codebook; | |
391 mask = sTrain.mask; | |
392 trainlen = sTrain.trainlen*dlen; | |
393 | |
394 % neighborhood radius | |
395 if length(radius)>2, | |
396 radius_type = 'user defined'; | |
397 else | |
398 radius = [sTrain.radius_ini sTrain.radius_fin]; | |
399 rini = radius(1); | |
400 rstep = (radius(end)-radius(1))/(trainlen-1); | |
401 radius_type = 'linear'; | |
402 end | |
403 | |
404 % learning rate | |
405 if length(alpha)>1, | |
406 sTrain.alpha_type ='user defined'; | |
407 if length(alpha) ~= trainlen, | |
408 error('Trainlen and length of neighborhood radius vector do not match.') | |
409 end | |
410 if any(isnan(alpha)), | |
411 error('NaN is an illegal learning rate.') | |
412 end | |
413 else | |
414 if isempty(alpha), alpha = sTrain.alpha_ini; end | |
415 if strcmp(sTrain.alpha_type,'inv'), | |
416 % alpha(t) = a / (t+b), where a and b are chosen suitably | |
417 % below, they are chosen so that alpha_fin = alpha_ini/100 | |
418 b = (trainlen - 1) / (100 - 1); | |
419 a = b * alpha; | |
420 end | |
421 end | |
422 | |
423 % initialize random number generator | |
424 rand('state',sum(100*clock)); | |
425 | |
426 % distance between map units in the output space | |
427 % Since in the case of gaussian and ep neighborhood functions, the | |
428 % equations utilize squares of the unit distances and in bubble case | |
429 % it doesn't matter which is used, the unitdistances and neighborhood | |
430 % radiuses are squared. | |
431 Ud = som_unit_dists(sTopol).^2; | |
432 | |
433 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
434 %% Action | |
435 | |
436 update_step = 100; | |
437 mu_x_1 = ones(munits,1); | |
438 samples = ones(update_step,1); | |
439 r = samples; | |
440 alfa = samples; | |
441 | |
442 qe = 0; | |
443 start = clock; | |
444 if tracking > 0, % initialize tracking | |
445 track_table = zeros(update_step,1); | |
446 qe = zeros(floor(trainlen/update_step),1); | |
447 end | |
448 | |
449 for t = 1:trainlen, | |
450 | |
451 % Every update_step, new values for sample indeces, neighborhood | |
452 % radius and learning rate are calculated. This could be done | |
453 % every step, but this way it is more efficient. Or this could | |
454 % be done all at once outside the loop, but it would require much | |
455 % more memory. | |
456 ind = rem(t,update_step); if ind==0, ind = update_step; end | |
457 if ind==1, | |
458 steps = [t:min(trainlen,t+update_step-1)]; | |
459 % sample order | |
460 switch sample_order_type, | |
461 case 'ordered', samples = rem(steps,dlen)+1; | |
462 case 'random', samples = ceil(dlen*rand(update_step,1)+eps); | |
463 end | |
464 | |
465 % neighborhood radius | |
466 switch radius_type, | |
467 case 'linear', r = rini+(steps-1)*rstep; | |
468 case 'user defined', r = radius(steps); | |
469 end | |
470 r=r.^2; % squared radius (see notes about Ud above) | |
471 r(r==0) = eps; % zero radius might cause div-by-zero error | |
472 | |
473 % learning rate | |
474 switch sTrain.alpha_type, | |
475 case 'linear', alfa = (1-steps/trainlen)*alpha; | |
476 case 'inv', alfa = a ./ (b + steps-1); | |
477 case 'power', alfa = alpha * (0.005/alpha).^((steps-1)/trainlen); | |
478 case 'user defined', alfa = alpha(steps); | |
479 end | |
480 end | |
481 | |
482 % find BMU | |
483 x = D(samples(ind),:); % pick one sample vector | |
484 known = ~isnan(x); % its known components | |
485 Dx = M(:,known) - x(mu_x_1,known); % each map unit minus the vector | |
486 [qerr bmu] = min((Dx.^2)*mask(known)); % minimum distance(^2) and the BMU | |
487 | |
488 % tracking | |
489 if tracking>0, | |
490 track_table(ind) = sqrt(qerr); | |
491 if ind==update_step, | |
492 n = ceil(t/update_step); | |
493 qe(n) = mean(track_table); | |
494 trackplot(M,D,tracking,start,n,qe); | |
495 end | |
496 end | |
497 | |
498 % neighborhood & learning rate | |
499 % notice that the elements Ud and radius have been squared! | |
500 % (see notes about Ud above) | |
501 switch sTrain.neigh, | |
502 case 'bubble', h = (Ud(:,bmu)<=r(ind)); | |
503 case 'gaussian', h = exp(-Ud(:,bmu)/(2*r(ind))); | |
504 case 'cutgauss', h = exp(-Ud(:,bmu)/(2*r(ind))) .* (Ud(:,bmu)<=r(ind)); | |
505 case 'ep', h = (1-Ud(:,bmu)/r(ind)) .* (Ud(:,bmu)<=r(ind)); | |
506 end | |
507 h = h*alfa(ind); | |
508 | |
509 % update M | |
510 M(:,known) = M(:,known) - h(:,ones(sum(known),1)).*Dx; | |
511 | |
512 end; % for t = 1:trainlen | |
513 | |
514 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
515 %% Build / clean up the return arguments | |
516 | |
517 if tracking, fprintf(1,'\n'); end | |
518 | |
519 % update structures | |
520 sTrain = som_set(sTrain,'time',datestr(now,0)); | |
521 if struct_mode, | |
522 sMap = som_set(sMap,'codebook',M,'mask',sTrain.mask,'neigh',sTrain.neigh); | |
523 tl = length(sMap.trainhist); | |
524 sMap.trainhist(tl+1) = sTrain; | |
525 else | |
526 sMap = reshape(M,orig_size); | |
527 end | |
528 | |
529 return; | |
530 | |
531 | |
532 | |
533 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
534 %% subfunctions | |
535 | |
536 %%%%%%%% | |
537 function [] = trackplot(M,D,tracking,start,n,qe) | |
538 | |
539 l = length(qe); | |
540 elap_t = etime(clock,start); | |
541 tot_t = elap_t*l/n; | |
542 fprintf(1,'\rTraining: %3.0f/ %3.0f s',elap_t,tot_t) | |
543 switch tracking | |
544 case 1, | |
545 case 2, | |
546 plot(1:n,qe(1:n),(n+1):l,qe((n+1):l)) | |
547 title('Quantization errors for latest samples') | |
548 drawnow | |
549 otherwise, | |
550 subplot(2,1,1), plot(1:n,qe(1:n),(n+1):l,qe((n+1):l)) | |
551 title('Quantization error for latest samples'); | |
552 subplot(2,1,2), plot(M(:,1),M(:,2),'ro',D(:,1),D(:,2),'b.'); | |
553 title('First two components of map units (o) and data vectors (+)'); | |
554 drawnow | |
555 end | |
556 % end of trackplot | |
557 |