Mercurial > hg > camir-aes2014
comparison toolboxes/FullBNT-1.0.7/netlabKPM/glmerr_weighted.m @ 0:e9a9cd732c1e tip
first hg version after svn
author | wolffd |
---|---|
date | Tue, 10 Feb 2015 15:05:51 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:e9a9cd732c1e |
---|---|
1 function [e, edata, eprior, y, a] = glmerr_weighted(net, x, t, eso_w) | |
2 %GLMERR Evaluate error function for generalized linear model. | |
3 % | |
4 % Description | |
5 % E = GLMERR(NET, X, T) takes a generalized linear model data | |
6 % structure NET together with a matrix X of input vectors and a matrix | |
7 % T of target vectors, and evaluates the error function E. The choice | |
8 % of error function corresponds to the output unit activation function. | |
9 % Each row of X corresponds to one input vector and each row of T | |
10 % corresponds to one target vector. | |
11 % | |
12 % [E, EDATA, EPRIOR, Y, A] = GLMERR(NET, X, T) also returns the data | |
13 % and prior components of the total error. | |
14 % | |
15 % [E, EDATA, EPRIOR, Y, A] = GLMERR(NET, X) also returns a matrix Y | |
16 % giving the outputs of the models and a matrix A giving the summed | |
17 % inputs to each output unit, where each row corresponds to one | |
18 % pattern. | |
19 % | |
20 % See also | |
21 % GLM, GLMPAK, GLMUNPAK, GLMFWD, GLMGRAD, GLMTRAIN | |
22 % | |
23 | |
24 % Copyright (c) Ian T Nabney (1996-9) | |
25 | |
26 % Check arguments for consistency | |
27 errstring = consist(net, 'glm', x, t); | |
28 if ~isempty(errstring); | |
29 error(errstring); | |
30 end | |
31 | |
32 [y, a] = glmfwd(net, x); | |
33 | |
34 %switch net.actfn | |
35 switch net.outfn | |
36 | |
37 case 'softmax' % Softmax outputs | |
38 | |
39 nout = size(a,2); | |
40 % Ensure that sum(exp(a), 2) does not overflow | |
41 maxcut = log(realmax) - log(nout); | |
42 % Ensure that exp(a) > 0 | |
43 mincut = log(realmin); | |
44 a = min(a, maxcut); | |
45 a = max(a, mincut); | |
46 temp = exp(a); | |
47 y = temp./(sum(temp, 2)*ones(1,nout)); | |
48 % Ensure that log(y) is computable | |
49 y(y<realmin) = realmin; | |
50 e_app=sum(t.*log(y),2); | |
51 edata = - eso_w'*e_app; | |
52 | |
53 otherwise | |
54 error(['Unknown activation function ', net.actfn]); | |
55 end | |
56 | |
57 [e, edata, eprior] = errbayes(net, edata); |