wolffd@0: % Fit a piece-wise linear regression model. wolffd@0: % Here is the model wolffd@0: % wolffd@0: % X \ wolffd@0: % | | wolffd@0: % Q | wolffd@0: % | / wolffd@0: % Y wolffd@0: % wolffd@0: % where all arcs point down. wolffd@0: % We condition everything on X, so X is a root node. Q is a softmax, and Y is a linear Gaussian. wolffd@0: % Q is hidden, X and Y are observed. wolffd@0: wolffd@0: X = 1; wolffd@0: Q = 2; wolffd@0: Y = 3; wolffd@0: dag = zeros(3,3); wolffd@0: dag(X,[Q Y]) = 1; wolffd@0: dag(Q,Y) = 1; wolffd@0: ns = [1 2 1]; % make X and Y scalars, and have 2 experts wolffd@0: dnodes = [2]; wolffd@0: onodes = [1 3]; wolffd@0: bnet = mk_bnet(dag, ns, 'discrete', dnodes, 'observed', onodes); wolffd@0: wolffd@0: IRLS_iter = 10; wolffd@0: clamped = 0; wolffd@0: wolffd@0: bnet.CPD{1} = root_CPD(bnet, 1); wolffd@0: wolffd@0: if 0 wolffd@0: % start with good initial params wolffd@0: w = [-5 5]; % w(:,i) is the normal vector to the i'th decisions boundary wolffd@0: b = [0 0]; % b(i) is the offset (bias) to the i'th decisions boundary wolffd@0: wolffd@0: mu = [0 0]; wolffd@0: sigma = 1; wolffd@0: Sigma = repmat(sigma*eye(ns(Y)), [ns(Y) ns(Y) ns(Q)]); wolffd@0: W = [-1 1]; wolffd@0: W2 = reshape(W, [ns(Y) ns(X) ns(Q)]); wolffd@0: wolffd@0: bnet.CPD{2} = softmax_CPD(bnet, 2, w, b, clamped, IRLS_iter); wolffd@0: bnet.CPD{3} = gaussian_CPD(bnet, 3, mu, Sigma, W2); wolffd@0: else wolffd@0: % start with rnd initial params wolffd@0: rand('state', 0); wolffd@0: randn('state', 0); wolffd@0: bnet.CPD{2} = softmax_CPD(bnet, 2, 'clamped', clamped, 'max_iter', IRLS_iter); wolffd@0: bnet.CPD{3} = gaussian_CPD(bnet, 3); wolffd@0: end wolffd@0: wolffd@0: wolffd@0: wolffd@0: load('/examples/static/Misc/mixexp_data.txt', '-ascii'); wolffd@0: % Just use 1/10th of the data, to speed things up wolffd@0: data = mixexp_data(1:10:end, :); wolffd@0: %data = mixexp_data; wolffd@0: wolffd@0: %plot(data(:,1), data(:,2), '.') wolffd@0: wolffd@0: wolffd@0: s = struct(bnet.CPD{2}); % violate object privacy wolffd@0: %eta0 = [s.glim.b1; s.glim.w1]'; wolffd@0: eta0 = [s.glim{1}.b1; s.glim{1}.w1]'; wolffd@0: s = struct(bnet.CPD{3}); % violate object privacy wolffd@0: W = reshape(s.weights, [1 2]); wolffd@0: theta0 = [s.mean; W]'; wolffd@0: wolffd@0: %figure(1) wolffd@0: %mixexp_plot(theta0, eta0, data); wolffd@0: %suptitle('before learning') wolffd@0: wolffd@0: ncases = size(data, 1); wolffd@0: cases = cell(3, ncases); wolffd@0: cases([1 3], :) = num2cell(data'); wolffd@0: wolffd@0: engine = jtree_inf_engine(bnet); wolffd@0: wolffd@0: % log lik before learning wolffd@0: ll = 0; wolffd@0: for l=1:ncases wolffd@0: ev = cases(:,l); wolffd@0: [engine, loglik] = enter_evidence(engine, ev); wolffd@0: ll = ll + loglik; wolffd@0: end wolffd@0: wolffd@0: % do learning wolffd@0: max_iter = 5; wolffd@0: [bnet2, LL2] = learn_params_em(engine, cases, max_iter); wolffd@0: wolffd@0: s = struct(bnet2.CPD{2}); wolffd@0: %eta2 = [s.glim.b1; s.glim.w1]'; wolffd@0: eta2 = [s.glim{1}.b1; s.glim{1}.w1]'; wolffd@0: s = struct(bnet2.CPD{3}); wolffd@0: W = reshape(s.weights, [1 2]); wolffd@0: theta2 = [s.mean; W]'; wolffd@0: wolffd@0: %figure(2) wolffd@0: %mixexp_plot(theta2, eta2, data); wolffd@0: %suptitle('after learning') wolffd@0: wolffd@0: fprintf('mixexp2: loglik before learning %f, after %d iters %f\n', ll, length(LL2), LL2(end)); wolffd@0: wolffd@0: wolffd@0: