Daniel@0: function [strategy, MEU, niter] = solve_limid(engine, varargin) Daniel@0: % SOLVE_LIMID Find the (locally) optimal strategy for a LIMID Daniel@0: % [strategy, MEU, niter] = solve_limid(inf_engine, ...) Daniel@0: % Daniel@0: % strategy{d} = stochastic policy for node d (a decision node) Daniel@0: % MEU = maximum expected utility Daniel@0: % niter = num iterations used Daniel@0: % Daniel@0: % The following optional arguments can be specified in the form of name/value pairs: Daniel@0: % [default in brackets] Daniel@0: % Daniel@0: % max_iter - max. num. iterations [ 1 ] Daniel@0: % tol - tolerance required of consecutive MEU values, used to assess convergence [1e-3] Daniel@0: % order - order in which decision nodes are optimized [ reverse numerical order ] Daniel@0: % Daniel@0: % e.g., solve_limid(engine, 'tol', 1e-2, 'max_iter', 10) Daniel@0: Daniel@0: bnet = bnet_from_engine(engine); Daniel@0: Daniel@0: % default values Daniel@0: max_iter = 1; Daniel@0: tol = 1e-3; Daniel@0: D = bnet.decision_nodes; Daniel@0: order = D(end:-1:1); Daniel@0: Daniel@0: args = varargin; Daniel@0: nargs = length(args); Daniel@0: for i=1:2:nargs Daniel@0: switch args{i}, Daniel@0: case 'max_iter', max_iter = args{i+1}; Daniel@0: case 'tol', tol = args{i+1}; Daniel@0: case 'order', order = args{i+1}; Daniel@0: otherwise, Daniel@0: error(['invalid argument name ' args{i}]); Daniel@0: end Daniel@0: end Daniel@0: Daniel@0: CPDs = bnet.CPD; Daniel@0: ns = bnet.node_sizes; Daniel@0: N = length(ns); Daniel@0: evidence = cell(1,N); Daniel@0: strategy = cell(1, N); Daniel@0: Daniel@0: iter = 1; Daniel@0: converged = 0; Daniel@0: oldMEU = 0; Daniel@0: while ~converged & (iter <= max_iter) Daniel@0: for d=order(:)' Daniel@0: engine = enter_evidence(engine, evidence, 'exclude', d); Daniel@0: [m, pot] = marginal_family(engine, d); Daniel@0: %pot = marginal_family_pot(engine, d); Daniel@0: [policy, score] = upot_to_opt_policy(pot); Daniel@0: e = bnet.equiv_class(d); Daniel@0: CPDs{e} = set_fields(CPDs{e}, 'policy', policy); Daniel@0: engine = update_engine(engine, CPDs); Daniel@0: strategy{d} = policy; Daniel@0: end Daniel@0: engine = enter_evidence(engine, evidence); Daniel@0: [m, pot] = marginal_nodes(engine, []); Daniel@0: %pot = marginal_family_pot(engine, []); Daniel@0: [dummy, MEU] = upot_to_opt_policy(pot); Daniel@0: if approxeq(MEU, oldMEU, tol) Daniel@0: converged = 1; Daniel@0: end Daniel@0: oldMEU = MEU; Daniel@0: iter = iter + 1; Daniel@0: end Daniel@0: niter = iter - 1;