annotate toolboxes/FullBNT-1.0.7/bnt/examples/dynamic/reveal1.m @ 0:e9a9cd732c1e tip

first hg version after svn
author wolffd
date Tue, 10 Feb 2015 15:05:51 +0000
parents
children
rev   line source
wolffd@0 1 % Make a DBN with the following inter-connectivity matrix
wolffd@0 2 % 1
wolffd@0 3 % / \
wolffd@0 4 % 2 3
wolffd@0 5 % \ /
wolffd@0 6 % 4
wolffd@0 7 % |
wolffd@0 8 % 5
wolffd@0 9 % where all arcs point down. In addition, there are persistence arcs from each node to itself.
wolffd@0 10 % There are no intra-slice connections.
wolffd@0 11 % Nodes have noisy-or CPDs.
wolffd@0 12 % Node 1 turns on spontaneously due to its leaky source.
wolffd@0 13 % This effect trickles down to the other nodes in the order shown.
wolffd@0 14 % All the other nodes inhibit their leaks.
wolffd@0 15 % None of the nodes inhibit the connection from themselves, so that once they are on, they remain
wolffd@0 16 % on (persistence).
wolffd@0 17 %
wolffd@0 18 % This model was used in the experiments reported in
wolffd@0 19 % - "Learning the structure of DBNs", Friedman, Murphy and Russell, UAI 1998.
wolffd@0 20 % where the structure was learned even in the presence of missing data.
wolffd@0 21 % In that paper, we used the structural EM algorithm.
wolffd@0 22 % Here, we assume full observability and tabular CPDs for the learner, so we can use a much
wolffd@0 23 % simpler learning algorithm.
wolffd@0 24
wolffd@0 25 ss = 5;
wolffd@0 26
wolffd@0 27 inter = eye(ss);
wolffd@0 28 inter(1,[2 3]) = 1;
wolffd@0 29 inter(2,4)=1;
wolffd@0 30 inter(3,4)=1;
wolffd@0 31 inter(4,5)=1;
wolffd@0 32
wolffd@0 33 intra = zeros(ss);
wolffd@0 34 ns = 2*ones(1,ss);
wolffd@0 35
wolffd@0 36 bnet = mk_dbn(intra, inter, ns);
wolffd@0 37
wolffd@0 38 % All nodes start out off
wolffd@0 39 for i=1:ss
wolffd@0 40 bnet.CPD{i} = tabular_CPD(bnet, i, [1.0 0.0]');
wolffd@0 41 end
wolffd@0 42
wolffd@0 43 % The following params correspond to Fig 4a in the UAI 98 paper
wolffd@0 44 % The first arg is the leak inhibition prob.
wolffd@0 45 % The vector contains the inhib probs from the parents in the previous slice;
wolffd@0 46 % the last element is self, which is never inhibited.
wolffd@0 47 bnet.CPD{1+ss} = noisyor_CPD(bnet, 1+ss, 0.8, 0);
wolffd@0 48 bnet.CPD{2+ss} = noisyor_CPD(bnet, 2+ss, 1, [0.9 0]);
wolffd@0 49 bnet.CPD{3+ss} = noisyor_CPD(bnet, 3+ss, 1, [0.8 0]);
wolffd@0 50 bnet.CPD{4+ss} = noisyor_CPD(bnet, 4+ss, 1, [0.7 0.6 0]);
wolffd@0 51 bnet.CPD{5+ss} = noisyor_CPD(bnet, 5+ss, 1, [0.5 0]);
wolffd@0 52
wolffd@0 53
wolffd@0 54 % Generate some training data
wolffd@0 55
wolffd@0 56 nseqs = 20;
wolffd@0 57 seqs = cell(1,nseqs);
wolffd@0 58 T = 30;
wolffd@0 59 for i=1:nseqs
wolffd@0 60 seqs{i} = sample_dbn(bnet, T);
wolffd@0 61 end
wolffd@0 62
wolffd@0 63 max_fan_in = 3; % let's cheat a little here
wolffd@0 64
wolffd@0 65 % computing num. incorrect edges as a fn of the size of the training set
wolffd@0 66 %sz = [5 10 15 20];
wolffd@0 67 sz = [5 10];
wolffd@0 68 h = zeros(1, length(sz));
wolffd@0 69 for i=1:length(sz)
wolffd@0 70 inter2 = learn_struct_dbn_reveal(seqs(1:sz(i)), ns, max_fan_in);
wolffd@0 71 h(i) = sum(abs(inter(:)-inter2(:))); % hamming distance
wolffd@0 72 end
wolffd@0 73 h