Daniel@0: % Fit a piece-wise linear regression model. Daniel@0: % Here is the model Daniel@0: % Daniel@0: % X \ Daniel@0: % | | Daniel@0: % Q | Daniel@0: % | / Daniel@0: % Y Daniel@0: % Daniel@0: % where all arcs point down. Daniel@0: % We condition everything on X, so X is a root node. Q is a softmax, and Y is a linear Gaussian. Daniel@0: % Q is hidden, X and Y are observed. Daniel@0: Daniel@0: X = 1; Daniel@0: Q = 2; Daniel@0: Y = 3; Daniel@0: dag = zeros(3,3); Daniel@0: dag(X,[Q Y]) = 1; Daniel@0: dag(Q,Y) = 1; Daniel@0: ns = [1 2 1]; % make X and Y scalars, and have 2 experts Daniel@0: dnodes = [2]; Daniel@0: onodes = [1 3]; Daniel@0: bnet = mk_bnet(dag, ns, 'discrete', dnodes, 'observed', onodes); Daniel@0: Daniel@0: IRLS_iter = 10; Daniel@0: clamped = 0; Daniel@0: Daniel@0: bnet.CPD{1} = root_CPD(bnet, 1); Daniel@0: Daniel@0: % start with good initial params Daniel@0: w = [-5 5]; % w(:,i) is the normal vector to the i'th decisions boundary Daniel@0: b = [0 0]; % b(i) is the offset (bias) to the i'th decisions boundary Daniel@0: Daniel@0: mu = [0 0]; Daniel@0: sigma = 1; Daniel@0: Sigma = repmat(sigma*eye(ns(Y)), [ns(Y) ns(Y) ns(Q)]); Daniel@0: W = [-1 1]; Daniel@0: W2 = reshape(W, [ns(Y) ns(X) ns(Q)]); Daniel@0: Daniel@0: bnet.CPD{2} = softmax_CPD(bnet, 2, w, b, clamped, IRLS_iter); Daniel@0: bnet.CPD{3} = gaussian_CPD(bnet, 3, 'mean', mu, 'cov', Sigma, 'weights', W2); Daniel@0: Daniel@0: Daniel@0: engine = jtree_inf_engine(bnet); Daniel@0: Daniel@0: evidence = cell(1,3); Daniel@0: evidence{X} = 0.68; Daniel@0: Daniel@0: engine = enter_evidence(engine, evidence); Daniel@0: Daniel@0: m = marginal_nodes(engine, Y); Daniel@0: m.mu