comparison toolboxes/FullBNT-1.0.7/bnt/learning/dirichlet_score_family.m @ 0:e9a9cd732c1e tip

first hg version after svn
author wolffd
date Tue, 10 Feb 2015 15:05:51 +0000
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:e9a9cd732c1e
1 function LL = dirichlet_score_family(counts, prior)
2 % DIRICHLET_SCORE Compute the log marginal likelihood of a single family
3 % LL = dirichlet_score(counts, prior)
4 %
5 % counts(a, b, ..., z) is the number of times parent 1 = a, parent 2 = b, ..., child = z
6 % prior is an optional multidimensional array of the same shape as counts.
7 % It defaults to a uniform prior.
8 %
9 % We marginalize out the parameters:
10 % LL = log \int \prod_m P(x(i,m) | x(Pa_i,m), theta_i) P(theta_i) d(theta_i)
11
12
13 % LL = log[ prod_j gamma(alpha_ij)/gamma(alpha_ij + N_ij) *
14 % prod_k gamma(alpha_ijk + N_ijk)/gamma(alpha_ijk) ]
15 % Call the prod_k term U and the prod_j term V.
16 % We reshape all quantities into (j,k) matrices
17 % This formula was first derived by Cooper and Herskovits, 1992.
18 % See also "Learning Bayesian Networks", Heckerman, Geiger and Chickering, MLJ 95.
19
20 ns = mysize(counts);
21 ns_ps = ns(1:end-1);
22 ns_self = ns(end);
23
24 if nargin < 2, prior = normalise(myones(ns)); end
25
26
27 if 1
28 prior = reshape(prior(:), [prod(ns_ps) ns_self]);
29 counts = reshape(counts, [prod(ns_ps) ns_self]);
30 %U = prod(gamma(prior + counts) ./ gamma(prior), 2); % mult over k
31 LU = sum(gammaln(prior + counts) - gammaln(prior), 2);
32 alpha_ij = sum(prior, 2); % sum over k
33 N_ij = sum(counts, 2);
34 %V = gamma(alpha_ij) ./ gamma(alpha_ij + N_ij);
35 LV = gammaln(alpha_ij) - gammaln(alpha_ij + N_ij);
36 %L = prod(U .* V);
37 LL = sum(LU + LV);
38 else
39 CPT = mk_stochastic(prior + counts);
40 LL = sum(log(CPT(:) .* counts(:)));
41 end
42