Mercurial > hg > camir-aes2014
comparison toolboxes/FullBNT-1.0.7/bnt/examples/static/fgraph/fg_mrf2.m @ 0:e9a9cd732c1e tip
first hg version after svn
author | wolffd |
---|---|
date | Tue, 10 Feb 2015 15:05:51 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:e9a9cd732c1e |
---|---|
1 seed = 0; | |
2 rand('state', seed); | |
3 randn('state', seed); | |
4 | |
5 nrows = 5; | |
6 ncols = 5; | |
7 npixels = nrows*ncols; | |
8 | |
9 % we number pixels in transposed raster scan order (top to bottom, left to right) | |
10 | |
11 % H(i,j) is the number of the hidden node at (i,j) | |
12 H = reshape(1:npixels, nrows, ncols); | |
13 % O(i,j) is the number of the obsevred node at (i,j) | |
14 O = reshape(1:npixels, nrows, ncols) + length(H(:)); | |
15 | |
16 | |
17 % Make a Bayes net where each hidden pixel generates an observed pixel | |
18 % but there are no connections between the hidden pixels. | |
19 % We use this just to generate noisy versions of known images. | |
20 N = 2*npixels; | |
21 dag = zeros(N); | |
22 for i=1:nrows | |
23 for j=1:ncols | |
24 dag(H(i,j), O(i,j)) = 1; | |
25 end | |
26 end | |
27 | |
28 | |
29 K = 2; % number of discrete values for the hidden vars | |
30 ns = ones(N,1); | |
31 ns(H(:)) = K; | |
32 ns(O(:)) = 1; | |
33 | |
34 | |
35 % make image with vertical stripes | |
36 I = zeros(nrows, ncols); | |
37 for j=1:2:ncols | |
38 I(:,j) = 1; | |
39 end | |
40 | |
41 % each "hidden" node will be instantiated to the pixel in the known image | |
42 % each observed node has conditional Gaussian distribution | |
43 eclass = ones(1,N); | |
44 %eclass(H(:)) = 1; | |
45 %eclass(O(:)) = 2; | |
46 eclass(H(:)) = 1:npixels; | |
47 eclass(O(:)) = npixels+1; | |
48 bnet = mk_bnet(dag, ns, 'discrete', H(:), 'equiv_class', eclass); | |
49 | |
50 | |
51 %bnet.CPD{1} = tabular_CPD(bnet, H(1), 'CPT', normalise(ones(1,K))); | |
52 for i=1:nrows | |
53 for j=1:ncols | |
54 bnet.CPD{H(i,j)} = root_CPD(bnet, H(i,j), I(i,j) + 1); | |
55 end | |
56 end | |
57 | |
58 % If H(i,j)=1, O(i,j)=+1 plus noise | |
59 % If H(i,j)=2, O(i,j)=-1 plus noise | |
60 sigma = 0.5; | |
61 bnet.CPD{eclass(O(1,1))} = gaussian_CPD(bnet, O(1,1), 'mean', [1 -1], 'cov', reshape(sigma*ones(1,K), [1 1 K])); | |
62 ofactor = bnet.CPD{eclass(O(1,1))}; | |
63 %ofactor = gaussian_CPD('self', 2, 'dps', 1, 'cps', [], 'sz', [K O], 'mean', [1 -1], 'cov', reshape(sigma*ones(1,K), [1 1 K))); | |
64 | |
65 | |
66 data = sample_bnet(bnet); | |
67 img = reshape(data(O(:)), nrows, ncols) | |
68 | |
69 | |
70 | |
71 | |
72 %%%%%%%%%%%%%%%%%%%%%%%%%%% | |
73 | |
74 % Now create MRF represented as a factor graph to try and recover the scene | |
75 | |
76 % VEF(i,j) is the number of the factor for the vertical edge between HV(i,j) - HV(i+1,j) | |
77 VEF = reshape((1:(nrows-1)*ncols), nrows-1, ncols); | |
78 % HEF(i,j) is the number of the factor for the horizontal edge between HV(i,j) - HV(i,j+1) | |
79 HEF = reshape((1:nrows*(ncols-1)), nrows, ncols-1) + length(VEF(:)); | |
80 | |
81 nvars = npixels; | |
82 nfac = length(VEF(:)) + length(HEF(:)); | |
83 | |
84 G = zeros(nvars, nfac); | |
85 N = length(ns); | |
86 eclass = zeros(1, nfac); % eclass(i)=j means factor i gets its params from factors{j} | |
87 vfactor_ndx = 1; % all vertcial edges get their params from factors{1} | |
88 hfactor_ndx = 2; % all vertcial edges get their params from factors{2} | |
89 for i=1:nrows | |
90 for j=1:ncols | |
91 if i < nrows | |
92 G(H(i:i+1,j), VEF(i,j)) = 1; | |
93 eclass(VEF(i,j)) = vfactor_ndx; | |
94 end | |
95 if j < ncols | |
96 G(H(i,j:j+1), HEF(i,j)) = 1; | |
97 eclass(HEF(i,j)) = hfactor_ndx; | |
98 end | |
99 end | |
100 end | |
101 | |
102 | |
103 % "kitten raised in cage" prior - more likely to see continguous vertical lines | |
104 vfactor = tabular_kernel([K K], softeye(K, 0.9)); | |
105 hfactor = tabular_kernel([K K], softeye(K, 0.5)); | |
106 factors = cell(1,2); | |
107 factors{vfactor_ndx} = vfactor; | |
108 factors{hfactor_ndx} = hfactor; | |
109 | |
110 ev_eclass = ones(1,N); % every observation factor gets is params from ofactor | |
111 ns = K*ones(1,nvars); | |
112 %fg = mk_fgraph_given_ev(G, ns, factors, {ofactor}, num2cell(img), 'equiv_class', eclass, 'ev_equiv_class', ev_eclass); | |
113 fg = mk_fgraph_given_ev(G, ns, factors, {ofactor}, img, 'equiv_class', eclass, 'ev_equiv_class', ev_eclass); | |
114 | |
115 bnet2 = fgraph_to_bnet(fg); | |
116 | |
117 % inference | |
118 | |
119 | |
120 maximize = 1; | |
121 | |
122 engine = {}; | |
123 engine{1} = belprop_fg_inf_engine(fg, 'max_iter', npixels*2); | |
124 engine{2} = jtree_inf_engine(bnet2); | |
125 nengines = length(engine); | |
126 | |
127 % on fg, we have already included the evidence | |
128 evidence = cell(1,npixels); | |
129 tic; [engine{1}, ll(1)] = enter_evidence(engine{1}, evidence, 'maximize', maximize); toc | |
130 | |
131 | |
132 % on bnet2, we must add evidence to the dummy nodes | |
133 V = fg.nvars; | |
134 dummy = V+1:V+fg.nfactors; | |
135 N = max(dummy); | |
136 evidence = cell(1, N); | |
137 evidence(dummy) = {1}; | |
138 tic; [engine{2}, ll(2)] = enter_evidence(engine{2}, evidence); toc | |
139 | |
140 | |
141 Ihat = zeros(nrows, ncols, nengines); | |
142 for e=1:nengines | |
143 for i=1:nrows | |
144 for j=1:ncols | |
145 m = marginal_nodes(engine{e}, H(i,j)); | |
146 Ihat(i,j,e) = argmax(m.T)-1; | |
147 end | |
148 end | |
149 end | |
150 Ihat |