annotate toolboxes/FullBNT-1.0.7/HMM/dhmm_em_demo.m @ 0:e9a9cd732c1e tip

first hg version after svn
author wolffd
date Tue, 10 Feb 2015 15:05:51 +0000
parents
children
rev   line source
wolffd@0 1 O = 3;
wolffd@0 2 Q = 2;
wolffd@0 3
wolffd@0 4 % "true" parameters
wolffd@0 5 prior0 = normalise(rand(Q,1));
wolffd@0 6 transmat0 = mk_stochastic(rand(Q,Q));
wolffd@0 7 obsmat0 = mk_stochastic(rand(Q,O));
wolffd@0 8
wolffd@0 9 % training data
wolffd@0 10 T = 5;
wolffd@0 11 nex = 10;
wolffd@0 12 data = dhmm_sample(prior0, transmat0, obsmat0, T, nex);
wolffd@0 13
wolffd@0 14 % initial guess of parameters
wolffd@0 15 prior1 = normalise(rand(Q,1));
wolffd@0 16 transmat1 = mk_stochastic(rand(Q,Q));
wolffd@0 17 obsmat1 = mk_stochastic(rand(Q,O));
wolffd@0 18
wolffd@0 19 % improve guess of parameters using EM
wolffd@0 20 [LL, prior2, transmat2, obsmat2] = dhmm_em(data, prior1, transmat1, obsmat1, 'max_iter', 5);
wolffd@0 21 LL
wolffd@0 22
wolffd@0 23 % use model to compute log likelihood
wolffd@0 24 loglik = dhmm_logprob(data, prior2, transmat2, obsmat2)
wolffd@0 25 % log lik is slightly different than LL(end), since it is computed after the final M step