To check out this repository please hg clone the following URL, or open the URL using EasyMercurial or your preferred Mercurial client.
root / _FullBNT / BNT / CPDs / @gmux_CPD / CPD_to_lambda_msg.m @ 8:b5b38998ef3b
History | View | Annotate | Download (2 KB)
| 1 |
function lam_msg = CPD_to_lambda_msg(CPD, msg_type, n, ps, msg, p, evidence) |
|---|---|
| 2 |
% CPD_TO_LAMBDA_MSG Compute lambda message (gmux) |
| 3 |
% lam_msg = compute_lambda_msg(CPD, msg_type, n, ps, msg, p, evidence) |
| 4 |
% Pearl p183 eq 4.52 |
| 5 |
|
| 6 |
% Let Y be this node, X1..Xn be the cts parents and M the discrete switch node. |
| 7 |
% e.g., for n=3, M=1 |
| 8 |
% |
| 9 |
% X1 X2 X3 M |
| 10 |
% \ |
| 11 |
% \ |
| 12 |
% Y |
| 13 |
% |
| 14 |
% So the only case in which we send an informative message is if p=1=M. |
| 15 |
% To the other cts parents, we send the "know nothing" message. |
| 16 |
|
| 17 |
switch msg_type |
| 18 |
case 'd', |
| 19 |
error('gaussian_CPD can''t create discrete msgs')
|
| 20 |
case 'g', |
| 21 |
cps = ps(CPD.cps); |
| 22 |
cpsizes = CPD.sizes(CPD.cps); |
| 23 |
self_size = CPD.sizes(end); |
| 24 |
i = find_equiv_posns(p, cps); % p is n's i'th cts parent |
| 25 |
psz = cpsizes(i); |
| 26 |
dps = ps(CPD.dps); |
| 27 |
M = evidence{dps};
|
| 28 |
if isempty(M) |
| 29 |
error('gmux node must have observed discrete parent')
|
| 30 |
end |
| 31 |
P = msg{n}.lambda.precision;
|
| 32 |
if all(P == 0) | (cps(M) ~= p) % if we know nothing, or are sending to a disconnected parent |
| 33 |
lam_msg.precision = zeros(psz, psz); |
| 34 |
lam_msg.info_state = zeros(psz, 1); |
| 35 |
return; |
| 36 |
end |
| 37 |
% We are sending a message to the only effectively connected parent. |
| 38 |
% There are no other incoming pi messages. |
| 39 |
Bmu = CPD.mean(:,M); |
| 40 |
BSigma = CPD.cov(:,:,M); |
| 41 |
Bi = CPD.weights(:,:,M); |
| 42 |
if (det(P) > 0) | isinf(P) |
| 43 |
if isinf(P) % Y is observed |
| 44 |
Sigma_lambda = zeros(self_size, self_size); % infinite precision => 0 variance |
| 45 |
mu_lambda = msg{n}.lambda.mu; % observed_value;
|
| 46 |
else |
| 47 |
Sigma_lambda = inv(P); |
| 48 |
mu_lambda = Sigma_lambda * msg{n}.lambda.info_state;
|
| 49 |
end |
| 50 |
C = inv(Sigma_lambda + BSigma); |
| 51 |
lam_msg.precision = Bi' * C * Bi; |
| 52 |
lam_msg.info_state = Bi' * C * (mu_lambda - Bmu); |
| 53 |
else |
| 54 |
% method that uses matrix inversion lemma |
| 55 |
A = inv(P + inv(BSigma)); |
| 56 |
C = P - P*A*P; |
| 57 |
lam_msg.precision = Bi' * C * Bi; |
| 58 |
D = eye(self_size) - P*A; |
| 59 |
z = msg{n}.lambda.info_state;
|
| 60 |
lam_msg.info_state = Bi' * (D*z - D*P*Bmu); |
| 61 |
end |
| 62 |
end |