Mercurial > hg > camir-aes2014
comparison toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/Old/kalman1.m @ 0:e9a9cd732c1e tip
first hg version after svn
author | wolffd |
---|---|
date | Tue, 10 Feb 2015 15:05:51 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:e9a9cd732c1e |
---|---|
1 % Make a linear dynamical system | |
2 % X1 -> X2 | |
3 % | | | |
4 % v v | |
5 % Y1 Y2 | |
6 | |
7 intra = zeros(2); | |
8 intra(1,2) = 1; | |
9 inter = zeros(2); | |
10 inter(1,1) = 1; | |
11 n = 2; | |
12 | |
13 X = 2; % size of hidden state | |
14 Y = 2; % size of observable state | |
15 | |
16 ns = [X Y]; | |
17 dnodes = []; | |
18 onodes = [2]; | |
19 eclass1 = [1 2]; | |
20 eclass2 = [3 2]; | |
21 bnet = mk_dbn(intra, inter, ns, 'discrete', dnodes, 'eclass1', eclass1, 'eclass2', eclass2, ... | |
22 'observed', onodes); | |
23 | |
24 x0 = rand(X,1); | |
25 V0 = eye(X); | |
26 C0 = rand(Y,X); | |
27 R0 = eye(Y); | |
28 A0 = rand(X,X); | |
29 Q0 = eye(X); | |
30 | |
31 bnet.CPD{1} = gaussian_CPD(bnet, 1, 'mean', x0, 'cov', V0, 'cov_prior_weight', 0); | |
32 bnet.CPD{2} = gaussian_CPD(bnet, 2, 'mean', zeros(Y,1), 'cov', R0, 'weights', C0, ... | |
33 'clamp_mean', 1, 'cov_prior_weight', 0); | |
34 bnet.CPD{3} = gaussian_CPD(bnet, 3, 'mean', zeros(X,1), 'cov', Q0, 'weights', A0, ... | |
35 'clamp_mean', 1, 'cov_prior_weight', 0); | |
36 | |
37 | |
38 T = 5; % fixed length sequences | |
39 | |
40 clear engine; | |
41 engine{1} = kalman_inf_engine(bnet); | |
42 engine{2} = jtree_unrolled_dbn_inf_engine(bnet, T); | |
43 engine{3} = jtree_dbn_inf_engine(bnet); | |
44 N = length(engine); | |
45 | |
46 % inference | |
47 | |
48 ev = sample_dbn(bnet, T); | |
49 evidence = cell(n,T); | |
50 evidence(onodes,:) = ev(onodes, :); | |
51 | |
52 t = 1; | |
53 query = [1 3]; | |
54 m = cell(1, N); | |
55 ll = zeros(1, N); | |
56 for i=1:N | |
57 [engine{i}, ll(i)] = enter_evidence(engine{i}, evidence); | |
58 m{i} = marginal_nodes(engine{i}, query, t); | |
59 end | |
60 | |
61 % compare all engines to engine{1} | |
62 for i=2:N | |
63 assert(approxeq(m{1}.mu, m{i}.mu)); | |
64 assert(approxeq(m{1}.Sigma, m{i}.Sigma)); | |
65 assert(approxeq(ll(1), ll(i))); | |
66 end | |
67 | |
68 if 0 | |
69 for i=2:N | |
70 approxeq(m{1}.mu, m{i}.mu) | |
71 approxeq(m{1}.Sigma, m{i}.Sigma) | |
72 approxeq(ll(1), ll(i)) | |
73 end | |
74 end | |
75 | |
76 % learning | |
77 | |
78 ncases = 5; | |
79 cases = cell(1, ncases); | |
80 for i=1:ncases | |
81 ev = sample_dbn(bnet, T); | |
82 cases{i} = cell(n,T); | |
83 cases{i}(onodes,:) = ev(onodes, :); | |
84 end | |
85 | |
86 max_iter = 2; | |
87 bnet2 = cell(1,N); | |
88 LLtrace = cell(1,N); | |
89 for i=1:N | |
90 [bnet2{i}, LLtrace{i}] = learn_params_dbn_em(engine{i}, cases, 'max_iter', max_iter); | |
91 end | |
92 | |
93 for i=1:N | |
94 temp = bnet2{i}; | |
95 for e=1:3 | |
96 CPD{i,e} = struct(temp.CPD{e}); | |
97 end | |
98 end | |
99 | |
100 for i=2:N | |
101 assert(approxeq(LLtrace{i}, LLtrace{1})); | |
102 for e=1:3 | |
103 assert(approxeq(CPD{i,e}.mean, CPD{1,e}.mean)); | |
104 assert(approxeq(CPD{i,e}.cov, CPD{1,e}.cov)); | |
105 assert(approxeq(CPD{i,e}.weights, CPD{1,e}.weights)); | |
106 end | |
107 end | |
108 | |
109 | |
110 % Compare to KF toolbox | |
111 | |
112 data = zeros(Y, T, ncases); | |
113 for i=1:ncases | |
114 data(:,:,i) = cell2num(cases{i}(onodes, :)); | |
115 end | |
116 [A2, C2, Q2, R2, x2, V2, LL2trace] = learn_kalman(data, A0, C0, Q0, R0, x0, V0, max_iter); | |
117 | |
118 | |
119 e = 1; | |
120 assert(approxeq(x2, CPD{e,1}.mean)) | |
121 assert(approxeq(V2, CPD{e,1}.cov)) | |
122 assert(approxeq(C2, CPD{e,2}.weights)) | |
123 assert(approxeq(R2, CPD{e,2}.cov)); | |
124 assert(approxeq(A2, CPD{e,3}.weights)) | |
125 assert(approxeq(Q2, CPD{e,3}.cov)); | |
126 assert(approxeq(LL2trace, LLtrace{1})) | |
127 |