annotate toolboxes/MIRtoolbox1.3.2/somtoolbox/neural_gas.m @ 0:e9a9cd732c1e tip

first hg version after svn
author wolffd
date Tue, 10 Feb 2015 15:05:51 +0000
parents
children
rev   line source
wolffd@0 1 function [Neurons] = neural_gas(D,n,epochs,alpha0,lambda0)
wolffd@0 2
wolffd@0 3 %NEURAL_GAS Quantizes the data space using the neural gas algorithm.
wolffd@0 4 %
wolffd@0 5 % Neurons = neural_gas(D, n, epochs, [alpha0], [lambda0])
wolffd@0 6 %
wolffd@0 7 % C = neural_gas(D,50,10);
wolffd@0 8 % sM = som_map_struct(sD);
wolffd@0 9 % sM.codebook = neural_gas(sD,size(sM.codebook,1),10);
wolffd@0 10 %
wolffd@0 11 % Input and output arguments ([]'s are optional):
wolffd@0 12 % D (matrix) the data matrix, size dlen x dim
wolffd@0 13 % (struct) a data struct
wolffd@0 14 % n (scalar) the number of neurons
wolffd@0 15 % epochs (scalar) the number of training epochs (the number of
wolffd@0 16 % training steps is dlen*epochs)
wolffd@0 17 % [alpha0] (scalar) initial step size, 0.5 by default
wolffd@0 18 % [lambda0] (scalar) initial decay constant, n/2 by default
wolffd@0 19 %
wolffd@0 20 % Neurons (matrix) the neuron matrix, size n x dim
wolffd@0 21 %
wolffd@0 22 % See also SOM_MAKE, KMEANS.
wolffd@0 23
wolffd@0 24 % References:
wolffd@0 25 % T.M.Martinetz, S.G.Berkovich, and K.J.Schulten. "Neural-gas" network
wolffd@0 26 % for vector quantization and its application to time-series prediction.
wolffd@0 27 % IEEE Transactions on Neural Networks, 4(4):558-569, 1993.
wolffd@0 28
wolffd@0 29 % Contributed to SOM Toolbox vs2, February 2nd, 2000 by Juha Vesanto
wolffd@0 30 % Copyright (c) by Juha Vesanto
wolffd@0 31 % http://www.cis.hut.fi/projects/somtoolbox/
wolffd@0 32
wolffd@0 33 % juuso 101297 020200
wolffd@0 34
wolffd@0 35 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
wolffd@0 36 %% Check arguments and initialize
wolffd@0 37
wolffd@0 38 error(nargchk(3, 5, nargin)); % check the number of input arguments
wolffd@0 39
wolffd@0 40 if isstruct(D), D = D.data; end
wolffd@0 41 [dlen,dim] = size(D);
wolffd@0 42 Neurons = (rand(n,dim)-0.5)*10e-5; % small initial values
wolffd@0 43 train_len = epochs*dlen;
wolffd@0 44
wolffd@0 45 if nargin<4 | isempty(alpha0) | isnan(alpha0), alpha0 = 0.5; end
wolffd@0 46 if nargin<5 | isempty(lambda0) | isnan(lambda0), lambda0 = n/2; end
wolffd@0 47
wolffd@0 48 % random sample order
wolffd@0 49 rand('state',sum(100*clock));
wolffd@0 50 sample_inds = ceil(dlen*rand(train_len,1));
wolffd@0 51
wolffd@0 52 % lambda
wolffd@0 53 lambda = lambda0 * (0.01/lambda0).^([0:(train_len-1)]/train_len);
wolffd@0 54
wolffd@0 55 % alpha
wolffd@0 56 alpha = alpha0 * (0.005/alpha0).^([0:(train_len-1)]/train_len);
wolffd@0 57
wolffd@0 58 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
wolffd@0 59 %% Action
wolffd@0 60
wolffd@0 61 for i=1:train_len,
wolffd@0 62
wolffd@0 63 % sample vector
wolffd@0 64 x = D(sample_inds(i),:); % sample vector
wolffd@0 65 known = ~isnan(x); % its known components
wolffd@0 66 X = x(ones(n,1),known); % we'll need this
wolffd@0 67
wolffd@0 68 % neighborhood ranking
wolffd@0 69 Dx = Neurons(:,known) - X; % difference between vector and all map units
wolffd@0 70 [qerrs, inds] = sort((Dx.^2)*known'); % 1-BMU, 2-BMU, etc.
wolffd@0 71 ranking(inds) = [0:(n-1)];
wolffd@0 72 h = exp(-ranking/lambda(i));
wolffd@0 73 H = h(ones(length(known),1),:)';
wolffd@0 74
wolffd@0 75 % update
wolffd@0 76 Neurons = Neurons + alpha(i)*H.*(x(ones(n,1),known) - Neurons(:,known));
wolffd@0 77
wolffd@0 78 % track
wolffd@0 79 fprintf(1,'%d / %d \r',i,train_len);
wolffd@0 80 if 0 & mod(i,50) == 0,
wolffd@0 81 hold off, plot3(D(:,1),D(:,2),D(:,3),'bo')
wolffd@0 82 hold on, plot3(Neurons(:,1),Neurons(:,2),Neurons(:,3),'r+')
wolffd@0 83 drawnow
wolffd@0 84 end
wolffd@0 85 end
wolffd@0 86
wolffd@0 87 fprintf(1,'\n');
wolffd@0 88
wolffd@0 89 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%