wolffd@0: function g = rbfbkp(net, x, z, n2, deltas) wolffd@0: %RBFBKP Backpropagate gradient of error function for RBF network. wolffd@0: % wolffd@0: % Description wolffd@0: % G = RBFBKP(NET, X, Z, N2, DELTAS) takes a network data structure NET wolffd@0: % together with a matrix X of input vectors, a matrix Z of hidden unit wolffd@0: % activations, a matrix N2 of the squared distances between centres and wolffd@0: % inputs, and a matrix DELTAS of the gradient of the error function wolffd@0: % with respect to the values of the output units (i.e. the summed wolffd@0: % inputs to the output units, before the activation function is wolffd@0: % applied). The return value is the gradient G of the error function wolffd@0: % with respect to the network weights. Each row of X corresponds to one wolffd@0: % input vector. wolffd@0: % wolffd@0: % This function is provided so that the common backpropagation wolffd@0: % algorithm can be used by RBF network models to compute gradients for wolffd@0: % the output values (in RBFDERIV) as well as standard error functions. wolffd@0: % wolffd@0: % See also wolffd@0: % RBF, RBFGRAD, RBFDERIV wolffd@0: % wolffd@0: wolffd@0: % Copyright (c) Ian T Nabney (1996-2001) wolffd@0: wolffd@0: % Evaluate second-layer gradients. wolffd@0: gw2 = z'*deltas; wolffd@0: gb2 = sum(deltas); wolffd@0: wolffd@0: % Evaluate hidden unit gradients wolffd@0: delhid = deltas*net.w2'; wolffd@0: wolffd@0: gc = zeros(net.nhidden, net.nin); wolffd@0: ndata = size(x, 1); wolffd@0: t1 = ones(ndata, 1); wolffd@0: t2 = ones(1, net.nin); wolffd@0: % Switch on activation function type wolffd@0: switch net.actfn wolffd@0: wolffd@0: case 'gaussian' % Gaussian wolffd@0: delhid = (delhid.*z); wolffd@0: % A loop seems essential, so do it with the shortest index vector wolffd@0: if (net.nin < net.nhidden) wolffd@0: for i = 1:net.nin wolffd@0: gc(:,i) = (sum(((x(:,i)*ones(1, net.nhidden)) - ... wolffd@0: (ones(ndata, 1)*(net.c(:,i)'))).*delhid, 1)./net.wi)'; wolffd@0: end wolffd@0: else wolffd@0: for i = 1:net.nhidden wolffd@0: gc(i,:) = sum((x - (t1*(net.c(i,:)))./net.wi(i)).*(delhid(:,i)*t2), 1); wolffd@0: end wolffd@0: end wolffd@0: gwi = sum((n2.*delhid)./(2.*(ones(ndata, 1)*(net.wi.^2))), 1); wolffd@0: wolffd@0: case 'tps' % Thin plate spline activation function wolffd@0: delhid = delhid.*(1+log(n2+(n2==0))); wolffd@0: for i = 1:net.nhidden wolffd@0: gc(i,:) = sum(2.*((t1*(net.c(i,:)) - x)).*(delhid(:,i)*t2), 1); wolffd@0: end wolffd@0: % widths are not adjustable in this model wolffd@0: gwi = []; wolffd@0: case 'r4logr' % r^4 log r activation function wolffd@0: delhid = delhid.*(n2.*(1+2.*log(n2+(n2==0)))); wolffd@0: for i = 1:net.nhidden wolffd@0: gc(i,:) = sum(2.*((t1*(net.c(i,:)) - x)).*(delhid(:,i)*t2), 1); wolffd@0: end wolffd@0: % widths are not adjustable in this model wolffd@0: gwi = []; wolffd@0: otherwise wolffd@0: error('Unknown activation function in rbfgrad') wolffd@0: end wolffd@0: wolffd@0: g = [gc(:)', gwi, gw2(:)', gb2];