wolffd@0
|
1 function [h, hdata] = glmhess(net, x, t, hdata)
|
wolffd@0
|
2 %GLMHESS Evaluate the Hessian matrix for a generalised linear model.
|
wolffd@0
|
3 %
|
wolffd@0
|
4 % Description
|
wolffd@0
|
5 % H = GLMHESS(NET, X, T) takes a GLM network data structure NET, a
|
wolffd@0
|
6 % matrix X of input values, and a matrix T of target values and returns
|
wolffd@0
|
7 % the full Hessian matrix H corresponding to the second derivatives of
|
wolffd@0
|
8 % the negative log posterior distribution, evaluated for the current
|
wolffd@0
|
9 % weight and bias values as defined by NET. Note that the target data
|
wolffd@0
|
10 % is not required in the calculation, but is included to make the
|
wolffd@0
|
11 % interface uniform with NETHESS. For linear and logistic outputs, the
|
wolffd@0
|
12 % computation is very simple and is done (in effect) in one line in
|
wolffd@0
|
13 % GLMTRAIN.
|
wolffd@0
|
14 %
|
wolffd@0
|
15 % [H, HDATA] = GLMHESS(NET, X, T) returns both the Hessian matrix H and
|
wolffd@0
|
16 % the contribution HDATA arising from the data dependent term in the
|
wolffd@0
|
17 % Hessian.
|
wolffd@0
|
18 %
|
wolffd@0
|
19 % H = GLMHESS(NET, X, T, HDATA) takes a network data structure NET, a
|
wolffd@0
|
20 % matrix X of input values, and a matrix T of target values, together
|
wolffd@0
|
21 % with the contribution HDATA arising from the data dependent term in
|
wolffd@0
|
22 % the Hessian, and returns the full Hessian matrix H corresponding to
|
wolffd@0
|
23 % the second derivatives of the negative log posterior distribution.
|
wolffd@0
|
24 % This version saves computation time if HDATA has already been
|
wolffd@0
|
25 % evaluated for the current weight and bias values.
|
wolffd@0
|
26 %
|
wolffd@0
|
27 % See also
|
wolffd@0
|
28 % GLM, GLMTRAIN, HESSCHEK, NETHESS
|
wolffd@0
|
29 %
|
wolffd@0
|
30
|
wolffd@0
|
31 % Copyright (c) Ian T Nabney (1996-2001)
|
wolffd@0
|
32
|
wolffd@0
|
33 % Check arguments for consistency
|
wolffd@0
|
34 errstring = consist(net, 'glm', x, t);
|
wolffd@0
|
35 if ~isempty(errstring);
|
wolffd@0
|
36 error(errstring);
|
wolffd@0
|
37 end
|
wolffd@0
|
38
|
wolffd@0
|
39 ndata = size(x, 1);
|
wolffd@0
|
40 nparams = net.nwts;
|
wolffd@0
|
41 nout = net.nout;
|
wolffd@0
|
42 p = glmfwd(net, x);
|
wolffd@0
|
43 inputs = [x ones(ndata, 1)];
|
wolffd@0
|
44
|
wolffd@0
|
45 if nargin == 3
|
wolffd@0
|
46 hdata = zeros(nparams); % Full Hessian matrix
|
wolffd@0
|
47 % Calculate data component of Hessian
|
wolffd@0
|
48 switch net.outfn
|
wolffd@0
|
49
|
wolffd@0
|
50 case 'linear'
|
wolffd@0
|
51 % No weighting function here
|
wolffd@0
|
52 out_hess = [x ones(ndata, 1)]'*[x ones(ndata, 1)];
|
wolffd@0
|
53 for j = 1:nout
|
wolffd@0
|
54 hdata = rearrange_hess(net, j, out_hess, hdata);
|
wolffd@0
|
55 end
|
wolffd@0
|
56 case 'logistic'
|
wolffd@0
|
57 % Each output is independent
|
wolffd@0
|
58 e = ones(1, net.nin+1);
|
wolffd@0
|
59 link_deriv = p.*(1-p);
|
wolffd@0
|
60 out_hess = zeros(net.nin+1);
|
wolffd@0
|
61 for j = 1:nout
|
wolffd@0
|
62 inputs = [x ones(ndata, 1)].*(sqrt(link_deriv(:,j))*e);
|
wolffd@0
|
63 out_hess = inputs'*inputs; % Hessian for this output
|
wolffd@0
|
64 hdata = rearrange_hess(net, j, out_hess, hdata);
|
wolffd@0
|
65 end
|
wolffd@0
|
66
|
wolffd@0
|
67 case 'softmax'
|
wolffd@0
|
68 bb_start = nparams - nout + 1; % Start of bias weights block
|
wolffd@0
|
69 ex_hess = zeros(nparams); % Contribution to Hessian from single example
|
wolffd@0
|
70 for m = 1:ndata
|
wolffd@0
|
71 X = x(m,:)'*x(m,:);
|
wolffd@0
|
72 a = diag(p(m,:))-((p(m,:)')*p(m,:));
|
wolffd@0
|
73 ex_hess(1:nparams-nout,1:nparams-nout) = kron(a, X);
|
wolffd@0
|
74 ex_hess(bb_start:nparams, bb_start:nparams) = a.*ones(net.nout, net.nout);
|
wolffd@0
|
75 temp = kron(a, x(m,:));
|
wolffd@0
|
76 ex_hess(bb_start:nparams, 1:nparams-nout) = temp;
|
wolffd@0
|
77 ex_hess(1:nparams-nout, bb_start:nparams) = temp';
|
wolffd@0
|
78 hdata = hdata + ex_hess;
|
wolffd@0
|
79 end
|
wolffd@0
|
80 otherwise
|
wolffd@0
|
81 error(['Unknown activation function ', net.outfn]);
|
wolffd@0
|
82 end
|
wolffd@0
|
83 end
|
wolffd@0
|
84
|
wolffd@0
|
85 [h, hdata] = hbayes(net, hdata);
|
wolffd@0
|
86
|
wolffd@0
|
87 function hdata = rearrange_hess(net, j, out_hess, hdata)
|
wolffd@0
|
88
|
wolffd@0
|
89 % Because all the biases come after all the input weights,
|
wolffd@0
|
90 % we have to rearrange the blocks that make up the network Hessian.
|
wolffd@0
|
91 % This function assumes that we are on the jth output and that all outputs
|
wolffd@0
|
92 % are independent.
|
wolffd@0
|
93
|
wolffd@0
|
94 bb_start = net.nwts - net.nout + 1; % Start of bias weights block
|
wolffd@0
|
95 ob_start = 1+(j-1)*net.nin; % Start of weight block for jth output
|
wolffd@0
|
96 ob_end = j*net.nin; % End of weight block for jth output
|
wolffd@0
|
97 b_index = bb_start+(j-1); % Index of bias weight
|
wolffd@0
|
98 % Put input weight block in right place
|
wolffd@0
|
99 hdata(ob_start:ob_end, ob_start:ob_end) = out_hess(1:net.nin, 1:net.nin);
|
wolffd@0
|
100 % Put second derivative of bias weight in right place
|
wolffd@0
|
101 hdata(b_index, b_index) = out_hess(net.nin+1, net.nin+1);
|
wolffd@0
|
102 % Put cross terms (input weight v bias weight) in right place
|
wolffd@0
|
103 hdata(b_index, ob_start:ob_end) = out_hess(net.nin+1,1:net.nin);
|
wolffd@0
|
104 hdata(ob_start:ob_end, b_index) = out_hess(1:net.nin, net.nin+1);
|
wolffd@0
|
105
|
wolffd@0
|
106 return |