Mercurial > hg > camir-aes2014
comparison toolboxes/FullBNT-1.0.7/netlab3.3/rbfgrad.m @ 0:e9a9cd732c1e tip
first hg version after svn
author | wolffd |
---|---|
date | Tue, 10 Feb 2015 15:05:51 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:e9a9cd732c1e |
---|---|
1 function [g, gdata, gprior] = rbfgrad(net, x, t) | |
2 %RBFGRAD Evaluate gradient of error function for RBF network. | |
3 % | |
4 % Description | |
5 % G = RBFGRAD(NET, X, T) takes a network data structure NET together | |
6 % with a matrix X of input vectors and a matrix T of target vectors, | |
7 % and evaluates the gradient G of the error function with respect to | |
8 % the network weights (i.e. including the hidden unit parameters). The | |
9 % error function is sum of squares. Each row of X corresponds to one | |
10 % input vector and each row of T contains the corresponding target | |
11 % vector. If the output function is 'NEUROSCALE' then the gradient is | |
12 % only computed for the output layer weights and biases. | |
13 % | |
14 % [G, GDATA, GPRIOR] = RBFGRAD(NET, X, T) also returns separately the | |
15 % data and prior contributions to the gradient. In the case of multiple | |
16 % groups in the prior, GPRIOR is a matrix with a row for each group and | |
17 % a column for each weight parameter. | |
18 % | |
19 % See also | |
20 % RBF, RBFFWD, RBFERR, RBFPAK, RBFUNPAK, RBFBKP | |
21 % | |
22 | |
23 % Copyright (c) Ian T Nabney (1996-2001) | |
24 | |
25 % Check arguments for consistency | |
26 switch net.outfn | |
27 case 'linear' | |
28 errstring = consist(net, 'rbf', x, t); | |
29 case 'neuroscale' | |
30 errstring = consist(net, 'rbf', x); | |
31 otherwise | |
32 error(['Unknown output function ', net.outfn]); | |
33 end | |
34 if ~isempty(errstring); | |
35 error(errstring); | |
36 end | |
37 | |
38 ndata = size(x, 1); | |
39 | |
40 [y, z, n2] = rbffwd(net, x); | |
41 | |
42 switch net.outfn | |
43 case 'linear' | |
44 | |
45 % Sum squared error at output units | |
46 delout = y - t; | |
47 | |
48 gdata = rbfbkp(net, x, z, n2, delout); | |
49 [g, gdata, gprior] = gbayes(net, gdata); | |
50 | |
51 case 'neuroscale' | |
52 % Compute the error gradient with respect to outputs | |
53 y_dist = sqrt(dist2(y, y)); | |
54 D = (t - y_dist)./(y_dist+diag(ones(ndata, 1))); | |
55 temp = y'; | |
56 gradient = 2.*sum(kron(D, ones(1, net.nout)) .* ... | |
57 (repmat(y, 1, ndata) - repmat((temp(:))', ndata, 1)), 1); | |
58 gradient = (reshape(gradient, net.nout, ndata))'; | |
59 % Compute the error gradient | |
60 gdata = rbfbkp(net, x, z, n2, gradient); | |
61 [g, gdata, gprior] = gbayes(net, gdata); | |
62 otherwise | |
63 error(['Unknown output function ', net.outfn]); | |
64 end | |
65 |