wolffd@0: O = 3; wolffd@0: Q = 2; wolffd@0: wolffd@0: % "true" parameters wolffd@0: prior0 = normalise(rand(Q,1)); wolffd@0: transmat0 = mk_stochastic(rand(Q,Q)); wolffd@0: obsmat0 = mk_stochastic(rand(Q,O)); wolffd@0: wolffd@0: % training data wolffd@0: T = 5; wolffd@0: nex = 10; wolffd@0: data = dhmm_sample(prior0, transmat0, obsmat0, T, nex); wolffd@0: wolffd@0: % initial guess of parameters wolffd@0: prior1 = normalise(rand(Q,1)); wolffd@0: transmat1 = mk_stochastic(rand(Q,Q)); wolffd@0: obsmat1 = mk_stochastic(rand(Q,O)); wolffd@0: wolffd@0: % improve guess of parameters using EM wolffd@0: [LL, prior2, transmat2, obsmat2] = dhmm_em(data, prior1, transmat1, obsmat1, 'max_iter', 5); wolffd@0: LL wolffd@0: wolffd@0: % use model to compute log likelihood wolffd@0: loglik = dhmm_logprob(data, prior2, transmat2, obsmat2) wolffd@0: % log lik is slightly different than LL(end), since it is computed after the final M step