wolffd@0: function [bnet, LL, engine] = learn_params_em(engine, evidence, max_iter, thresh) wolffd@0: % LEARN_PARAMS_EM Set the parameters of each adjustable node to their ML/MAP values using batch EM. wolffd@0: % [bnet, LLtrace, engine] = learn_params_em(engine, data, max_iter, thresh) wolffd@0: % wolffd@0: % data{i,l} is the value of node i in case l, or [] if hidden. wolffd@0: % Suppose you have L training cases in an O*L array, D, where O is the num observed wolffd@0: % scalar nodes, and N is the total num nodes. wolffd@0: % Then you can create 'data' as follows, where onodes is the index of the observable nodes: wolffd@0: % data = cell(N, L); wolffd@0: % data(onodes,:) = num2cell(D); wolffd@0: % Of course it is possible for different sets of nodes to be observed in each case. wolffd@0: % wolffd@0: % We return the modified bnet and engine. wolffd@0: % To see the learned parameters for node i, use the construct wolffd@0: % s = struct(bnet.CPD{i}); % violate object privacy wolffd@0: % LLtrace is the learning curve: the vector of log-likelihood scores at each iteration. wolffd@0: % wolffd@0: % max_iter specifies the maximum number of iterations. Default: 10. wolffd@0: % wolffd@0: % thresh specifies the thresold for stopping EM. Default: 1e-3. wolffd@0: % We stop when |f(t) - f(t-1)| / avg < threshold, wolffd@0: % where avg = (|f(t)| + |f(t-1)|)/2 and f is log lik. wolffd@0: wolffd@0: if nargin < 3, max_iter = 10; end wolffd@0: if nargin < 4, thresh = 1e-3; end wolffd@0: wolffd@0: verbose = 1; wolffd@0: wolffd@0: loglik = 0; wolffd@0: previous_loglik = -inf; wolffd@0: converged = 0; wolffd@0: num_iter = 1; wolffd@0: LL = []; wolffd@0: wolffd@0: while ~converged & (num_iter <= max_iter) wolffd@0: [engine, loglik] = EM_step(engine, evidence); wolffd@0: if verbose, fprintf('EM iteration %d, ll = %8.4f\n', num_iter, loglik); end wolffd@0: num_iter = num_iter + 1; wolffd@0: converged = em_converged(loglik, previous_loglik, thresh); wolffd@0: previous_loglik = loglik; wolffd@0: LL = [LL loglik]; wolffd@0: end wolffd@0: if verbose, fprintf('\n'); end wolffd@0: wolffd@0: bnet = bnet_from_engine(engine); wolffd@0: wolffd@0: %%%%%%%%% wolffd@0: wolffd@0: function [engine, loglik] = EM_step(engine, cases) wolffd@0: wolffd@0: bnet = bnet_from_engine(engine); % engine contains the old params that are used for the E step wolffd@0: CPDs = bnet.CPD; % these are the new params that get maximized wolffd@0: num_CPDs = length(CPDs); wolffd@0: adjustable = zeros(1,num_CPDs); wolffd@0: for e=1:num_CPDs wolffd@0: adjustable(e) = adjustable_CPD(CPDs{e}); wolffd@0: end wolffd@0: adj = find(adjustable); wolffd@0: n = length(bnet.dag); wolffd@0: wolffd@0: for e=adj(:)' wolffd@0: CPDs{e} = reset_ess(CPDs{e}); wolffd@0: end wolffd@0: wolffd@0: loglik = 0; wolffd@0: ncases = size(cases, 2); wolffd@0: for l=1:ncases wolffd@0: evidence = cases(:,l); wolffd@0: [engine, ll] = enter_evidence(engine, evidence); wolffd@0: loglik = loglik + ll; wolffd@0: hidden_bitv = zeros(1,n); wolffd@0: hidden_bitv(isemptycell(evidence))=1; wolffd@0: for i=1:n wolffd@0: e = bnet.equiv_class(i); wolffd@0: if adjustable(e) wolffd@0: fmarg = marginal_family(engine, i); wolffd@0: CPDs{e} = update_ess(CPDs{e}, fmarg, evidence, bnet.node_sizes, bnet.cnodes, hidden_bitv); wolffd@0: end wolffd@0: end wolffd@0: end wolffd@0: wolffd@0: for e=adj(:)' wolffd@0: CPDs{e} = maximize_params(CPDs{e}); wolffd@0: end wolffd@0: wolffd@0: engine = update_engine(engine, CPDs); wolffd@0: wolffd@0: