wolffd@0: function [h, hdata] = glmhess(net, x, t, hdata) wolffd@0: %GLMHESS Evaluate the Hessian matrix for a generalised linear model. wolffd@0: % wolffd@0: % Description wolffd@0: % H = GLMHESS(NET, X, T) takes a GLM network data structure NET, a wolffd@0: % matrix X of input values, and a matrix T of target values and returns wolffd@0: % the full Hessian matrix H corresponding to the second derivatives of wolffd@0: % the negative log posterior distribution, evaluated for the current wolffd@0: % weight and bias values as defined by NET. Note that the target data wolffd@0: % is not required in the calculation, but is included to make the wolffd@0: % interface uniform with NETHESS. For linear and logistic outputs, the wolffd@0: % computation is very simple and is done (in effect) in one line in wolffd@0: % GLMTRAIN. wolffd@0: % wolffd@0: % [H, HDATA] = GLMHESS(NET, X, T) returns both the Hessian matrix H and wolffd@0: % the contribution HDATA arising from the data dependent term in the wolffd@0: % Hessian. wolffd@0: % wolffd@0: % H = GLMHESS(NET, X, T, HDATA) takes a network data structure NET, a wolffd@0: % matrix X of input values, and a matrix T of target values, together wolffd@0: % with the contribution HDATA arising from the data dependent term in wolffd@0: % the Hessian, and returns the full Hessian matrix H corresponding to wolffd@0: % the second derivatives of the negative log posterior distribution. wolffd@0: % This version saves computation time if HDATA has already been wolffd@0: % evaluated for the current weight and bias values. wolffd@0: % wolffd@0: % See also wolffd@0: % GLM, GLMTRAIN, HESSCHEK, NETHESS wolffd@0: % wolffd@0: wolffd@0: % Copyright (c) Ian T Nabney (1996-2001) wolffd@0: wolffd@0: % Check arguments for consistency wolffd@0: errstring = consist(net, 'glm', x, t); wolffd@0: if ~isempty(errstring); wolffd@0: error(errstring); wolffd@0: end wolffd@0: wolffd@0: ndata = size(x, 1); wolffd@0: nparams = net.nwts; wolffd@0: nout = net.nout; wolffd@0: p = glmfwd(net, x); wolffd@0: inputs = [x ones(ndata, 1)]; wolffd@0: wolffd@0: if nargin == 3 wolffd@0: hdata = zeros(nparams); % Full Hessian matrix wolffd@0: % Calculate data component of Hessian wolffd@0: switch net.outfn wolffd@0: wolffd@0: case 'linear' wolffd@0: % No weighting function here wolffd@0: out_hess = [x ones(ndata, 1)]'*[x ones(ndata, 1)]; wolffd@0: for j = 1:nout wolffd@0: hdata = rearrange_hess(net, j, out_hess, hdata); wolffd@0: end wolffd@0: case 'logistic' wolffd@0: % Each output is independent wolffd@0: e = ones(1, net.nin+1); wolffd@0: link_deriv = p.*(1-p); wolffd@0: out_hess = zeros(net.nin+1); wolffd@0: for j = 1:nout wolffd@0: inputs = [x ones(ndata, 1)].*(sqrt(link_deriv(:,j))*e); wolffd@0: out_hess = inputs'*inputs; % Hessian for this output wolffd@0: hdata = rearrange_hess(net, j, out_hess, hdata); wolffd@0: end wolffd@0: wolffd@0: case 'softmax' wolffd@0: bb_start = nparams - nout + 1; % Start of bias weights block wolffd@0: ex_hess = zeros(nparams); % Contribution to Hessian from single example wolffd@0: for m = 1:ndata wolffd@0: X = x(m,:)'*x(m,:); wolffd@0: a = diag(p(m,:))-((p(m,:)')*p(m,:)); wolffd@0: ex_hess(1:nparams-nout,1:nparams-nout) = kron(a, X); wolffd@0: ex_hess(bb_start:nparams, bb_start:nparams) = a.*ones(net.nout, net.nout); wolffd@0: temp = kron(a, x(m,:)); wolffd@0: ex_hess(bb_start:nparams, 1:nparams-nout) = temp; wolffd@0: ex_hess(1:nparams-nout, bb_start:nparams) = temp'; wolffd@0: hdata = hdata + ex_hess; wolffd@0: end wolffd@0: otherwise wolffd@0: error(['Unknown activation function ', net.outfn]); wolffd@0: end wolffd@0: end wolffd@0: wolffd@0: [h, hdata] = hbayes(net, hdata); wolffd@0: wolffd@0: function hdata = rearrange_hess(net, j, out_hess, hdata) wolffd@0: wolffd@0: % Because all the biases come after all the input weights, wolffd@0: % we have to rearrange the blocks that make up the network Hessian. wolffd@0: % This function assumes that we are on the jth output and that all outputs wolffd@0: % are independent. wolffd@0: wolffd@0: bb_start = net.nwts - net.nout + 1; % Start of bias weights block wolffd@0: ob_start = 1+(j-1)*net.nin; % Start of weight block for jth output wolffd@0: ob_end = j*net.nin; % End of weight block for jth output wolffd@0: b_index = bb_start+(j-1); % Index of bias weight wolffd@0: % Put input weight block in right place wolffd@0: hdata(ob_start:ob_end, ob_start:ob_end) = out_hess(1:net.nin, 1:net.nin); wolffd@0: % Put second derivative of bias weight in right place wolffd@0: hdata(b_index, b_index) = out_hess(net.nin+1, net.nin+1); wolffd@0: % Put cross terms (input weight v bias weight) in right place wolffd@0: hdata(b_index, ob_start:ob_end) = out_hess(net.nin+1,1:net.nin); wolffd@0: hdata(ob_start:ob_end, b_index) = out_hess(1:net.nin, net.nin+1); wolffd@0: wolffd@0: return