annotate toolboxes/FullBNT-1.0.7/netlab3.3/rbfgrad.m @ 0:e9a9cd732c1e tip

first hg version after svn
author wolffd
date Tue, 10 Feb 2015 15:05:51 +0000
parents
children
rev   line source
wolffd@0 1 function [g, gdata, gprior] = rbfgrad(net, x, t)
wolffd@0 2 %RBFGRAD Evaluate gradient of error function for RBF network.
wolffd@0 3 %
wolffd@0 4 % Description
wolffd@0 5 % G = RBFGRAD(NET, X, T) takes a network data structure NET together
wolffd@0 6 % with a matrix X of input vectors and a matrix T of target vectors,
wolffd@0 7 % and evaluates the gradient G of the error function with respect to
wolffd@0 8 % the network weights (i.e. including the hidden unit parameters). The
wolffd@0 9 % error function is sum of squares. Each row of X corresponds to one
wolffd@0 10 % input vector and each row of T contains the corresponding target
wolffd@0 11 % vector. If the output function is 'NEUROSCALE' then the gradient is
wolffd@0 12 % only computed for the output layer weights and biases.
wolffd@0 13 %
wolffd@0 14 % [G, GDATA, GPRIOR] = RBFGRAD(NET, X, T) also returns separately the
wolffd@0 15 % data and prior contributions to the gradient. In the case of multiple
wolffd@0 16 % groups in the prior, GPRIOR is a matrix with a row for each group and
wolffd@0 17 % a column for each weight parameter.
wolffd@0 18 %
wolffd@0 19 % See also
wolffd@0 20 % RBF, RBFFWD, RBFERR, RBFPAK, RBFUNPAK, RBFBKP
wolffd@0 21 %
wolffd@0 22
wolffd@0 23 % Copyright (c) Ian T Nabney (1996-2001)
wolffd@0 24
wolffd@0 25 % Check arguments for consistency
wolffd@0 26 switch net.outfn
wolffd@0 27 case 'linear'
wolffd@0 28 errstring = consist(net, 'rbf', x, t);
wolffd@0 29 case 'neuroscale'
wolffd@0 30 errstring = consist(net, 'rbf', x);
wolffd@0 31 otherwise
wolffd@0 32 error(['Unknown output function ', net.outfn]);
wolffd@0 33 end
wolffd@0 34 if ~isempty(errstring);
wolffd@0 35 error(errstring);
wolffd@0 36 end
wolffd@0 37
wolffd@0 38 ndata = size(x, 1);
wolffd@0 39
wolffd@0 40 [y, z, n2] = rbffwd(net, x);
wolffd@0 41
wolffd@0 42 switch net.outfn
wolffd@0 43 case 'linear'
wolffd@0 44
wolffd@0 45 % Sum squared error at output units
wolffd@0 46 delout = y - t;
wolffd@0 47
wolffd@0 48 gdata = rbfbkp(net, x, z, n2, delout);
wolffd@0 49 [g, gdata, gprior] = gbayes(net, gdata);
wolffd@0 50
wolffd@0 51 case 'neuroscale'
wolffd@0 52 % Compute the error gradient with respect to outputs
wolffd@0 53 y_dist = sqrt(dist2(y, y));
wolffd@0 54 D = (t - y_dist)./(y_dist+diag(ones(ndata, 1)));
wolffd@0 55 temp = y';
wolffd@0 56 gradient = 2.*sum(kron(D, ones(1, net.nout)) .* ...
wolffd@0 57 (repmat(y, 1, ndata) - repmat((temp(:))', ndata, 1)), 1);
wolffd@0 58 gradient = (reshape(gradient, net.nout, ndata))';
wolffd@0 59 % Compute the error gradient
wolffd@0 60 gdata = rbfbkp(net, x, z, n2, gradient);
wolffd@0 61 [g, gdata, gprior] = gbayes(net, gdata);
wolffd@0 62 otherwise
wolffd@0 63 error(['Unknown output function ', net.outfn]);
wolffd@0 64 end
wolffd@0 65