Mercurial > hg > camir-aes2014
comparison toolboxes/MIRtoolbox1.3.2/somtoolbox/som_batchtrain.m @ 0:e9a9cd732c1e tip
first hg version after svn
author | wolffd |
---|---|
date | Tue, 10 Feb 2015 15:05:51 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:e9a9cd732c1e |
---|---|
1 function [sMap,sTrain] = som_batchtrain(sMap, D, varargin) | |
2 | |
3 %SOM_BATCHTRAIN Use batch algorithm to train the Self-Organizing Map. | |
4 % | |
5 % [sM,sT] = som_batchtrain(sM, D, [argID, value, ...]) | |
6 % | |
7 % sM = som_batchtrain(sM,D); | |
8 % sM = som_batchtrain(sM,sD,'radius',[10 3 2 1 0.1],'tracking',3); | |
9 % [M,sT] = som_batchtrain(M,D,'ep','msize',[10 3],'hexa'); | |
10 % | |
11 % Input and output arguments ([]'s are optional): | |
12 % sM (struct) map struct, the trained and updated map is returned | |
13 % (matrix) codebook matrix of a self-organizing map | |
14 % size munits x dim or msize(1) x ... x msize(k) x dim | |
15 % The trained map codebook is returned. | |
16 % D (struct) training data; data struct | |
17 % (matrix) training data, size dlen x dim | |
18 % [argID, (string) See below. The values which are unambiguous can | |
19 % value] (varies) be given without the preceeding argID. | |
20 % | |
21 % sT (struct) learning parameters used during the training | |
22 % | |
23 % Here are the valid argument IDs and corresponding values. The values which | |
24 % are unambiguous (marked with '*') can be given without the preceeding argID. | |
25 % 'mask' (vector) BMU search mask, size dim x 1 | |
26 % 'msize' (vector) map size | |
27 % 'radius' (vector) neighborhood radiuses, length 1, 2 or trainlen | |
28 % 'radius_ini' (scalar) initial training radius | |
29 % 'radius_fin' (scalar) final training radius | |
30 % 'tracking' (scalar) tracking level, 0-3 | |
31 % 'trainlen' (scalar) training length in epochs | |
32 % 'train' *(struct) train struct, parameters for training | |
33 % 'sTrain','som_train' = 'train' | |
34 % 'neigh' *(string) neighborhood function, 'gaussian', 'cutgauss', | |
35 % 'ep' or 'bubble' | |
36 % 'topol' *(struct) topology struct | |
37 % 'som_topol','sTopol' = 'topol' | |
38 % 'lattice' *(string) map lattice, 'hexa' or 'rect' | |
39 % 'shape' *(string) map shape, 'sheet', 'cyl' or 'toroid' | |
40 % 'weights' (vector) sample weights: each sample is weighted | |
41 % | |
42 % For more help, try 'type som_batchtrain' or check out online documentation. | |
43 % See also SOM_MAKE, SOM_SEQTRAIN, SOM_TRAIN_STRUCT. | |
44 | |
45 %%%%%%%%%%%%% DETAILED DESCRIPTION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
46 % | |
47 % som_batchtrain | |
48 % | |
49 % PURPOSE | |
50 % | |
51 % Trains a Self-Organizing Map using the batch algorithm. | |
52 % | |
53 % SYNTAX | |
54 % | |
55 % sM = som_batchtrain(sM,D); | |
56 % sM = som_batchtrain(sM,sD); | |
57 % sM = som_batchtrain(...,'argID',value,...); | |
58 % sM = som_batchtrain(...,value,...); | |
59 % [sM,sT] = som_batchtrain(M,D,...); | |
60 % | |
61 % DESCRIPTION | |
62 % | |
63 % Trains the given SOM (sM or M above) with the given training data | |
64 % (sD or D) using batch training algorithm. If no optional arguments | |
65 % (argID, value) are given, a default training is done. Using optional | |
66 % arguments the training parameters can be specified. Returns the | |
67 % trained and updated SOM and a train struct which contains | |
68 % information on the training. | |
69 % | |
70 % REFERENCES | |
71 % | |
72 % Kohonen, T., "Self-Organizing Map", 2nd ed., Springer-Verlag, | |
73 % Berlin, 1995, pp. 127-128. | |
74 % Kohonen, T., "Things you haven't heard about the Self-Organizing | |
75 % Map", In proceedings of International Conference | |
76 % on Neural Networks (ICNN), San Francisco, 1993, pp. 1147-1156. | |
77 % | |
78 % KNOWN BUGS | |
79 % | |
80 % Batchtrain does not work correctly for a map with a single unit. | |
81 % This is because of the way 'min'-function works. | |
82 % | |
83 % REQUIRED INPUT ARGUMENTS | |
84 % | |
85 % sM The map to be trained. | |
86 % (struct) map struct | |
87 % (matrix) codebook matrix (field .data of map struct) | |
88 % Size is either [munits dim], in which case the map grid | |
89 % dimensions (msize) should be specified with optional arguments, | |
90 % or [msize(1) ... msize(k) dim] in which case the map | |
91 % grid dimensions are taken from the size of the matrix. | |
92 % Lattice, by default, is 'rect' and shape 'sheet'. | |
93 % D Training data. | |
94 % (struct) data struct | |
95 % (matrix) data matrix, size [dlen dim] | |
96 % | |
97 % OPTIONAL INPUT ARGUMENTS | |
98 % | |
99 % argID (string) Argument identifier string (see below). | |
100 % value (varies) Value for the argument (see below). | |
101 % | |
102 % The optional arguments can be given as 'argID',value -pairs. If an | |
103 % argument is given value multiple times, the last one is | |
104 % used. The valid IDs and corresponding values are listed below. The values | |
105 % which are unambiguous (marked with '*') can be given without the | |
106 % preceeding argID. | |
107 % | |
108 % Below is the list of valid arguments: | |
109 % 'mask' (vector) BMU search mask, size dim x 1. Default is | |
110 % the one in sM (field '.mask') or a vector of | |
111 % ones if only a codebook matrix was given. | |
112 % 'msize' (vector) map grid dimensions. Default is the one | |
113 % in sM (field sM.topol.msize) or | |
114 % 'si = size(sM); msize = si(1:end-1);' | |
115 % if only a codebook matrix was given. | |
116 % 'radius' (vector) neighborhood radius | |
117 % length = 1: radius_ini = radius | |
118 % length = 2: [radius_ini radius_fin] = radius | |
119 % length > 2: the vector given neighborhood | |
120 % radius for each step separately | |
121 % trainlen = length(radius) | |
122 % 'radius_ini' (scalar) initial training radius | |
123 % 'radius_fin' (scalar) final training radius | |
124 % 'tracking' (scalar) tracking level: 0, 1 (default), 2 or 3 | |
125 % 0 - estimate time | |
126 % 1 - track time and quantization error | |
127 % 2 - plot quantization error | |
128 % 3 - plot quantization error and two first | |
129 % components | |
130 % 'trainlen' (scalar) training length in epochs | |
131 % 'train' *(struct) train struct, parameters for training. | |
132 % Default parameters, unless specified, | |
133 % are acquired using SOM_TRAIN_STRUCT (this | |
134 % also applies for 'trainlen', 'radius_ini' | |
135 % and 'radius_fin'). | |
136 % 'sTrain', 'som_topol' (struct) = 'train' | |
137 % 'neigh' *(string) The used neighborhood function. Default is | |
138 % the one in sM (field '.neigh') or 'gaussian' | |
139 % if only a codebook matrix was given. Other | |
140 % possible values is 'cutgauss', 'ep' and 'bubble'. | |
141 % 'topol' *(struct) topology of the map. Default is the one | |
142 % in sM (field '.topol'). | |
143 % 'sTopol', 'som_topol' (struct) = 'topol' | |
144 % 'lattice' *(string) map lattice. Default is the one in sM | |
145 % (field sM.topol.lattice) or 'rect' | |
146 % if only a codebook matrix was given. | |
147 % 'shape' *(string) map shape. Default is the one in sM | |
148 % (field sM.topol.shape) or 'sheet' | |
149 % if only a codebook matrix was given. | |
150 % 'weights' (vector) weight for each data vector: during training, | |
151 % each data sample is weighted with the corresponding | |
152 % value, for example giving weights = [1 1 2 1] | |
153 % would have the same result as having third sample | |
154 % appear 2 times in the data | |
155 % | |
156 % OUTPUT ARGUMENTS | |
157 % | |
158 % sM the trained map | |
159 % (struct) if a map struct was given as input argument, a | |
160 % map struct is also returned. The current training | |
161 % is added to the training history (sM.trainhist). | |
162 % The 'neigh' and 'mask' fields of the map struct | |
163 % are updated to match those of the training. | |
164 % (matrix) if a matrix was given as input argument, a matrix | |
165 % is also returned with the same size as the input | |
166 % argument. | |
167 % sT (struct) train struct; information of the accomplished training | |
168 % | |
169 % EXAMPLES | |
170 % | |
171 % Simplest case: | |
172 % sM = som_batchtrain(sM,D); | |
173 % sM = som_batchtrain(sM,sD); | |
174 % | |
175 % To change the tracking level, 'tracking' argument is specified: | |
176 % sM = som_batchtrain(sM,D,'tracking',3); | |
177 % | |
178 % The change training parameters, the optional arguments 'train','neigh', | |
179 % 'mask','trainlen','radius','radius_ini' and 'radius_fin' are used. | |
180 % sM = som_batchtrain(sM,D,'neigh','cutgauss','trainlen',10,'radius_fin',0); | |
181 % | |
182 % Another way to specify training parameters is to create a train struct: | |
183 % sTrain = som_train_struct(sM,'dlen',size(D,1)); | |
184 % sTrain = som_set(sTrain,'neigh','cutgauss'); | |
185 % sM = som_batchtrain(sM,D,sTrain); | |
186 % | |
187 % By default the neighborhood radius goes linearly from radius_ini to | |
188 % radius_fin. If you want to change this, you can use the 'radius' argument | |
189 % to specify the neighborhood radius for each step separately: | |
190 % sM = som_batchtrain(sM,D,'radius',[5 3 1 1 1 1 0.5 0.5 0.5]); | |
191 % | |
192 % You don't necessarily have to use the map struct, but you can operate | |
193 % directly with codebook matrices. However, in this case you have to | |
194 % specify the topology of the map in the optional arguments. The | |
195 % following commads are identical (M is originally a 200 x dim sized matrix): | |
196 % M = som_batchtrain(M,D,'msize',[20 10],'lattice','hexa','shape','cyl'); | |
197 % or | |
198 % M = som_batchtrain(M,D,'msize',[20 10],'hexa','cyl'); | |
199 % or | |
200 % sT= som_set('som_topol','msize',[20 10],'lattice','hexa','shape','cyl'); | |
201 % M = som_batchtrain(M,D,sT); | |
202 % or | |
203 % M = reshape(M,[20 10 dim]); | |
204 % M = som_batchtrain(M,D,'hexa','cyl'); | |
205 % | |
206 % The som_batchtrain also returns a train struct with information on the | |
207 % accomplished training. This struct is also added to the end of the | |
208 % trainhist field of map struct, in case a map struct was given. | |
209 % [M,sTrain] = som_batchtrain(M,D,'msize',[20 10]); | |
210 % [sM,sTrain] = som_batchtrain(sM,D); % sM.trainhist{end}==sTrain | |
211 % | |
212 % SEE ALSO | |
213 % | |
214 % som_make Initialize and train a SOM using default parameters. | |
215 % som_seqtrain Train SOM with sequential algorithm. | |
216 % som_train_struct Determine default training parameters. | |
217 | |
218 % Copyright (c) 1997-2000 by the SOM toolbox programming team. | |
219 % http://www.cis.hut.fi/projects/somtoolbox/ | |
220 | |
221 % Version 1.0beta juuso 071197 041297 | |
222 % Version 2.0beta juuso 101199 | |
223 | |
224 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
225 %% Check arguments | |
226 | |
227 error(nargchk(2, Inf, nargin)); % check the number of input arguments | |
228 | |
229 % map | |
230 struct_mode = isstruct(sMap); | |
231 if struct_mode, | |
232 sTopol = sMap.topol; | |
233 else | |
234 orig_size = size(sMap); | |
235 if ndims(sMap) > 2, | |
236 si = size(sMap); dim = si(end); msize = si(1:end-1); | |
237 M = reshape(sMap,[prod(msize) dim]); | |
238 else | |
239 msize = [orig_size(1) 1]; | |
240 dim = orig_size(2); | |
241 end | |
242 sMap = som_map_struct(dim,'msize',msize); | |
243 sTopol = sMap.topol; | |
244 end | |
245 [munits dim] = size(sMap.codebook); | |
246 | |
247 % data | |
248 if isstruct(D), | |
249 data_name = D.name; | |
250 D = D.data; | |
251 else | |
252 data_name = inputname(2); | |
253 end | |
254 nonempty = find(sum(isnan(D),2) < dim); | |
255 D = D(nonempty,:); % remove empty vectors from the data | |
256 [dlen ddim] = size(D); % check input dimension | |
257 if dim ~= ddim, | |
258 error('Map and data input space dimensions disagree.'); | |
259 end | |
260 | |
261 % varargin | |
262 sTrain = som_set('som_train','algorithm','batch','neigh', ... | |
263 sMap.neigh,'mask',sMap.mask,'data_name',data_name); | |
264 radius = []; | |
265 tracking = 1; | |
266 weights = 1; | |
267 | |
268 i=1; | |
269 while i<=length(varargin), | |
270 argok = 1; | |
271 if ischar(varargin{i}), | |
272 switch varargin{i}, | |
273 % argument IDs | |
274 case 'msize', i=i+1; sTopol.msize = varargin{i}; | |
275 case 'lattice', i=i+1; sTopol.lattice = varargin{i}; | |
276 case 'shape', i=i+1; sTopol.shape = varargin{i}; | |
277 case 'mask', i=i+1; sTrain.mask = varargin{i}; | |
278 case 'neigh', i=i+1; sTrain.neigh = varargin{i}; | |
279 case 'trainlen', i=i+1; sTrain.trainlen = varargin{i}; | |
280 case 'tracking', i=i+1; tracking = varargin{i}; | |
281 case 'weights', i=i+1; weights = varargin{i}; | |
282 case 'radius_ini', i=i+1; sTrain.radius_ini = varargin{i}; | |
283 case 'radius_fin', i=i+1; sTrain.radius_fin = varargin{i}; | |
284 case 'radius', | |
285 i=i+1; | |
286 l = length(varargin{i}); | |
287 if l==1, | |
288 sTrain.radius_ini = varargin{i}; | |
289 else | |
290 sTrain.radius_ini = varargin{i}(1); | |
291 sTrain.radius_fin = varargin{i}(end); | |
292 if l>2, radius = varargin{i}; end | |
293 end | |
294 case {'sTrain','train','som_train'}, i=i+1; sTrain = varargin{i}; | |
295 case {'topol','sTopol','som_topol'}, | |
296 i=i+1; | |
297 sTopol = varargin{i}; | |
298 if prod(sTopol.msize) ~= munits, | |
299 error('Given map grid size does not match the codebook size.'); | |
300 end | |
301 % unambiguous values | |
302 case {'hexa','rect'}, sTopol.lattice = varargin{i}; | |
303 case {'sheet','cyl','toroid'}, sTopol.shape = varargin{i}; | |
304 case {'gaussian','cutgauss','ep','bubble'}, sTrain.neigh = varargin{i}; | |
305 otherwise argok=0; | |
306 end | |
307 elseif isstruct(varargin{i}) & isfield(varargin{i},'type'), | |
308 switch varargin{i}(1).type, | |
309 case 'som_topol', | |
310 sTopol = varargin{i}; | |
311 if prod(sTopol.msize) ~= munits, | |
312 error('Given map grid size does not match the codebook size.'); | |
313 end | |
314 case 'som_train', sTrain = varargin{i}; | |
315 otherwise argok=0; | |
316 end | |
317 else | |
318 argok = 0; | |
319 end | |
320 if ~argok, | |
321 disp(['(som_batchtrain) Ignoring invalid argument #' num2str(i+2)]); | |
322 end | |
323 i = i+1; | |
324 end | |
325 | |
326 % take only weights of non-empty vectors | |
327 if length(weights)>dlen, weights = weights(nonempty); end | |
328 | |
329 % trainlen | |
330 if ~isempty(radius), sTrain.trainlen = length(radius); end | |
331 | |
332 % check topology | |
333 if struct_mode, | |
334 if ~strcmp(sTopol.lattice,sMap.topol.lattice) | ... | |
335 ~strcmp(sTopol.shape,sMap.topol.shape) | ... | |
336 any(sTopol.msize ~= sMap.topol.msize), | |
337 warning('Changing the original map topology.'); | |
338 end | |
339 end | |
340 sMap.topol = sTopol; | |
341 | |
342 % complement the training struct | |
343 sTrain = som_train_struct(sTrain,sMap,'dlen',dlen); | |
344 if isempty(sTrain.mask), sTrain.mask = ones(dim,1); end | |
345 | |
346 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
347 %% initialize | |
348 | |
349 M = sMap.codebook; | |
350 mask = sTrain.mask; | |
351 trainlen = sTrain.trainlen; | |
352 | |
353 % neighborhood radius | |
354 if trainlen==1, | |
355 radius = sTrain.radius_ini; | |
356 elseif length(radius)<=2, | |
357 r0 = sTrain.radius_ini; r1 = sTrain.radius_fin; | |
358 radius = r1 + fliplr((0:(trainlen-1))/(trainlen-1)) * (r0 - r1); | |
359 else | |
360 % nil | |
361 end | |
362 | |
363 % distance between map units in the output space | |
364 % Since in the case of gaussian and ep neighborhood functions, the | |
365 % equations utilize squares of the unit distances and in bubble case | |
366 % it doesn't matter which is used, the unitdistances and neighborhood | |
367 % radiuses are squared. | |
368 Ud = som_unit_dists(sTopol); | |
369 Ud = Ud.^2; | |
370 radius = radius.^2; | |
371 % zero neighborhood radius may cause div-by-zero error | |
372 radius(find(radius==0)) = eps; | |
373 | |
374 % The training algorithm involves calculating weighted Euclidian distances | |
375 % to all map units for each data vector. Basically this is done as | |
376 % for i=1:dlen, | |
377 % for j=1:munits, | |
378 % for k=1:dim | |
379 % Dist(j,i) = Dist(j,i) + mask(k) * (D(i,k) - M(j,k))^2; | |
380 % end | |
381 % end | |
382 % end | |
383 % where mask is the weighting vector for distance calculation. However, taking | |
384 % into account that distance between vectors m and v can be expressed as | |
385 % |m - v|^2 = sum_i ((m_i - v_i)^2) = sum_i (m_i^2 + v_i^2 - 2*m_i*v_i) | |
386 % this can be made much faster by transforming it to a matrix operation: | |
387 % Dist = (M.^2)*mask*ones(1,d) + ones(m,1)*mask'*(D'.^2) - 2*M*diag(mask)*D' | |
388 % Of the involved matrices, several are constant, as the mask and data do | |
389 % not change during training. Therefore they are calculated beforehand. | |
390 | |
391 % For the case where there are unknown components in the data, each data | |
392 % vector will have an individual mask vector so that for that unit, the | |
393 % unknown components are not taken into account in distance calculation. | |
394 % In addition all NaN's are changed to zeros so that they don't screw up | |
395 % the matrix multiplications and behave correctly in updating step. | |
396 Known = ~isnan(D); | |
397 W1 = (mask*ones(1,dlen)) .* Known'; | |
398 D(find(~Known)) = 0; | |
399 | |
400 % constant matrices | |
401 WD = 2*diag(mask)*D'; % constant matrix | |
402 dconst = ((D.^2)*mask)'; % constant in distance calculation for each data sample | |
403 % W2 = ones(munits,1)*mask'; D2 = (D'.^2); | |
404 | |
405 % initialize tracking | |
406 start = clock; | |
407 qe = zeros(trainlen,1); | |
408 | |
409 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
410 %% Action | |
411 | |
412 % With the 'blen' parameter you can control the memory consumption | |
413 % of the algorithm, which is in practive directly proportional | |
414 % to munits*blen. If you're having problems with memory, try to | |
415 % set the value of blen lower. | |
416 blen = min(munits,dlen); | |
417 | |
418 % reserve some space | |
419 bmus = zeros(1,dlen); | |
420 ddists = zeros(1,dlen); | |
421 | |
422 for t = 1:trainlen, | |
423 | |
424 % batchy train - this is done a block of data (inds) at a time | |
425 % rather than in a single sweep to save memory consumption. | |
426 % The 'Dist' and 'Hw' matrices have size munits*blen | |
427 % which - if you have a lot of data - would be HUGE if you | |
428 % calculated it all at once. A single-sweep version would | |
429 % look like this: | |
430 % Dist = (M.^2)*W1 - M*WD; %+ W2*D2 | |
431 % [ddists, bmus] = min(Dist); | |
432 % (notice that the W2*D2 term can be ignored since it is constant) | |
433 % This "batchy" version is the same as single-sweep if blen=dlen. | |
434 i0 = 0; | |
435 while i0+1<=dlen, | |
436 inds = [(i0+1):min(dlen,i0+blen)]; i0 = i0+blen; | |
437 Dist = (M.^2)*W1(:,inds) - M*WD(:,inds); | |
438 [ddists(inds), bmus(inds)] = min(Dist); | |
439 end | |
440 | |
441 % tracking | |
442 if tracking > 0, | |
443 ddists = ddists+dconst; % add the constant term | |
444 ddists(ddists<0) = 0; % rounding errors... | |
445 qe(t) = mean(sqrt(ddists)); | |
446 trackplot(M,D,tracking,start,t,qe); | |
447 end | |
448 | |
449 % neighborhood | |
450 % notice that the elements Ud and radius have been squared! | |
451 % note: 'bubble' matches the original "Batch Map" algorithm | |
452 switch sTrain.neigh, | |
453 case 'bubble', H = (Ud<=radius(t)); | |
454 case 'gaussian', H = exp(-Ud/(2*radius(t))); | |
455 case 'cutgauss', H = exp(-Ud/(2*radius(t))) .* (Ud<=radius(t)); | |
456 case 'ep', H = (1-Ud/radius(t)) .* (Ud<=radius(t)); | |
457 end | |
458 | |
459 % update | |
460 | |
461 % In principle the updating step goes like this: replace each map unit | |
462 % by the average of the data vectors that were in its neighborhood. | |
463 % The contribution, or activation, of data vectors in the mean can | |
464 % be varied with the neighborhood function. This activation is given | |
465 % by matrix H. So, for each map unit the new weight vector is | |
466 % | |
467 % m = sum_i (h_i * d_i) / sum_i (h_i), | |
468 % | |
469 % where i denotes the index of data vector. Since the values of | |
470 % neighborhood function h_i are the same for all data vectors belonging to | |
471 % the Voronoi set of the same map unit, the calculation is actually done | |
472 % by first calculating a partition matrix P with elements p_ij=1 if the | |
473 % BMU of data vector j is i. | |
474 | |
475 P = sparse(bmus,[1:dlen],weights,munits,dlen); | |
476 | |
477 % Then the sum of vectors in each Voronoi set are calculated (P*D) and the | |
478 % neighborhood is taken into account by calculating a weighted sum of the | |
479 % Voronoi sum (H*). The "activation" matrix A is the denominator of the | |
480 % equation above. | |
481 | |
482 S = H*(P*D); | |
483 A = H*(P*Known); | |
484 | |
485 % If you'd rather make this without using the Voronoi sets try the following: | |
486 % Hi = H(:,bmus); | |
487 % S = Hi * D; % "sum_i (h_i * d_i)" | |
488 % A = Hi * Known; % "sum_i (h_i)" | |
489 % The bad news is that the matrix Hi has size [munits x dlen]... | |
490 | |
491 % only update units for which the "activation" is nonzero | |
492 nonzero = find(A > 0); | |
493 M(nonzero) = S(nonzero) ./ A(nonzero); | |
494 | |
495 end; % for t = 1:trainlen | |
496 | |
497 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
498 %% Build / clean up the return arguments | |
499 | |
500 % tracking | |
501 if tracking > 0, fprintf(1,'\n'); end | |
502 | |
503 % update structures | |
504 sTrain = som_set(sTrain,'time',datestr(now,0)); | |
505 if struct_mode, | |
506 sMap = som_set(sMap,'codebook',M,'mask',sTrain.mask,'neigh',sTrain.neigh); | |
507 tl = length(sMap.trainhist); | |
508 sMap.trainhist(tl+1) = sTrain; | |
509 else | |
510 sMap = reshape(M,orig_size); | |
511 end | |
512 | |
513 return; | |
514 | |
515 | |
516 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | |
517 %% subfunctions | |
518 | |
519 %%%%%%%% | |
520 function [] = trackplot(M,D,tracking,start,n,qe) | |
521 | |
522 l = length(qe); | |
523 elap_t = etime(clock,start); | |
524 tot_t = elap_t*l/n; | |
525 fprintf(1,'\rTraining: %3.0f/ %3.0f s',elap_t,tot_t) | |
526 switch tracking | |
527 case 1, | |
528 case 2, | |
529 plot(1:n,qe(1:n),(n+1):l,qe((n+1):l)) | |
530 title('Quantization error after each epoch'); | |
531 drawnow | |
532 otherwise, | |
533 subplot(2,1,1), plot(1:n,qe(1:n),(n+1):l,qe((n+1):l)) | |
534 title('Quantization error after each epoch'); | |
535 subplot(2,1,2), plot(M(:,1),M(:,2),'ro',D(:,1),D(:,2),'b+'); | |
536 title('First two components of map units (o) and data vectors (+)'); | |
537 drawnow | |
538 end | |
539 % end of trackplot |