Mercurial > hg > camir-aes2014
comparison toolboxes/FullBNT-1.0.7/netlab3.3/rbf.m @ 0:e9a9cd732c1e tip
first hg version after svn
author | wolffd |
---|---|
date | Tue, 10 Feb 2015 15:05:51 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:e9a9cd732c1e |
---|---|
1 function net = rbf(nin, nhidden, nout, rbfunc, outfunc, prior, beta) | |
2 %RBF Creates an RBF network with specified architecture | |
3 % | |
4 % Description | |
5 % NET = RBF(NIN, NHIDDEN, NOUT, RBFUNC) constructs and initialises a | |
6 % radial basis function network returning a data structure NET. The | |
7 % weights are all initialised with a zero mean, unit variance normal | |
8 % distribution, with the exception of the variances, which are set to | |
9 % one. This makes use of the Matlab function RANDN and so the seed for | |
10 % the random weight initialization can be set using RANDN('STATE', S) | |
11 % where S is the seed value. The activation functions are defined in | |
12 % terms of the distance between the data point and the corresponding | |
13 % centre. Note that the functions are computed to a convenient | |
14 % constant multiple: for example, the Gaussian is not normalised. | |
15 % (Normalisation is not needed as the function outputs are linearly | |
16 % combined in the next layer.) | |
17 % | |
18 % The fields in NET are | |
19 % type = 'rbf' | |
20 % nin = number of inputs | |
21 % nhidden = number of hidden units | |
22 % nout = number of outputs | |
23 % nwts = total number of weights and biases | |
24 % actfn = string defining hidden unit activation function: | |
25 % 'gaussian' for a radially symmetric Gaussian function. | |
26 % 'tps' for r^2 log r, the thin plate spline function. | |
27 % 'r4logr' for r^4 log r. | |
28 % outfn = string defining output error function: | |
29 % 'linear' for linear outputs (default) and SoS error. | |
30 % 'neuroscale' for Sammon stress measure. | |
31 % c = centres | |
32 % wi = squared widths (null for rlogr and tps) | |
33 % w2 = second layer weight matrix | |
34 % b2 = second layer bias vector | |
35 % | |
36 % NET = RBF(NIN, NHIDDEN, NOUT, RBFUND, OUTFUNC) allows the user to | |
37 % specify the type of error function to be used. The field OUTFN is | |
38 % set to the value of this string. Linear outputs (for regression | |
39 % problems) and Neuroscale outputs (for topographic mappings) are | |
40 % supported. | |
41 % | |
42 % NET = RBF(NIN, NHIDDEN, NOUT, RBFUNC, OUTFUNC, PRIOR, BETA), in which | |
43 % PRIOR is a scalar, allows the field NET.ALPHA in the data structure | |
44 % NET to be set, corresponding to a zero-mean isotropic Gaussian prior | |
45 % with inverse variance with value PRIOR. Alternatively, PRIOR can | |
46 % consist of a data structure with fields ALPHA and INDEX, allowing | |
47 % individual Gaussian priors to be set over groups of weights in the | |
48 % network. Here ALPHA is a column vector in which each element | |
49 % corresponds to a separate group of weights, which need not be | |
50 % mutually exclusive. The membership of the groups is defined by the | |
51 % matrix INDX in which the columns correspond to the elements of ALPHA. | |
52 % Each column has one element for each weight in the matrix, in the | |
53 % order defined by the function RBFPAK, and each element is 1 or 0 | |
54 % according to whether the weight is a member of the corresponding | |
55 % group or not. A utility function RBFPRIOR is provided to help in | |
56 % setting up the PRIOR data structure. | |
57 % | |
58 % NET = RBF(NIN, NHIDDEN, NOUT, FUNC, PRIOR, BETA) also sets the | |
59 % additional field NET.BETA in the data structure NET, where beta | |
60 % corresponds to the inverse noise variance. | |
61 % | |
62 % See also | |
63 % RBFERR, RBFFWD, RBFGRAD, RBFPAK, RBFTRAIN, RBFUNPAK | |
64 % | |
65 | |
66 % Copyright (c) Ian T Nabney (1996-2001) | |
67 | |
68 net.type = 'rbf'; | |
69 net.nin = nin; | |
70 net.nhidden = nhidden; | |
71 net.nout = nout; | |
72 | |
73 % Check that function is an allowed type | |
74 actfns = {'gaussian', 'tps', 'r4logr'}; | |
75 outfns = {'linear', 'neuroscale'}; | |
76 if (strcmp(rbfunc, actfns)) == 0 | |
77 error('Undefined activation function.') | |
78 else | |
79 net.actfn = rbfunc; | |
80 end | |
81 if nargin <= 4 | |
82 net.outfn = outfns{1}; | |
83 elseif (strcmp(outfunc, outfns) == 0) | |
84 error('Undefined output function.') | |
85 else | |
86 net.outfn = outfunc; | |
87 end | |
88 | |
89 % Assume each function has a centre and a single width parameter, and that | |
90 % hidden layer to output weights include a bias. Only the Gaussian function | |
91 % requires a width | |
92 net.nwts = nin*nhidden + (nhidden + 1)*nout; | |
93 if strcmp(rbfunc, 'gaussian') | |
94 % Extra weights for width parameters | |
95 net.nwts = net.nwts + nhidden; | |
96 end | |
97 | |
98 if nargin > 5 | |
99 if isstruct(prior) | |
100 net.alpha = prior.alpha; | |
101 net.index = prior.index; | |
102 elseif size(prior) == [1 1] | |
103 net.alpha = prior; | |
104 else | |
105 error('prior must be a scalar or a structure'); | |
106 end | |
107 if nargin > 6 | |
108 net.beta = beta; | |
109 end | |
110 end | |
111 | |
112 w = randn(1, net.nwts); | |
113 net = rbfunpak(net, w); | |
114 | |
115 % Make widths equal to one | |
116 if strcmp(rbfunc, 'gaussian') | |
117 net.wi = ones(1, nhidden); | |
118 end | |
119 | |
120 if strcmp(net.outfn, 'neuroscale') | |
121 net.mask = rbfprior(rbfunc, nin, nhidden, nout); | |
122 end | |
123 |