Daniel@0: % Check that softmax works with a simple classification demo. Daniel@0: % Based on netlab's demglm2 Daniel@0: % X -> Q where X is an input node, and Q is a softmax Daniel@0: Daniel@0: rand('state', 0); Daniel@0: randn('state', 0); Daniel@0: Daniel@0: % Check inference Daniel@0: Daniel@0: input_dim = 2; Daniel@0: num_classes = 3; Daniel@0: IRLS_iter = 3; Daniel@0: Daniel@0: net = glm(input_dim, num_classes, 'softmax'); Daniel@0: Daniel@0: dag = zeros(2); Daniel@0: dag(1,2) = 1; Daniel@0: discrete_nodes = [2]; Daniel@0: bnet = mk_bnet(dag, [input_dim num_classes], 'discrete', discrete_nodes, 'observed', 1); Daniel@0: bnet.CPD{1} = root_CPD(bnet, 1); Daniel@0: clamped = 0; Daniel@0: bnet.CPD{2} = softmax_CPD(bnet, 2, net.w1, net.b1, clamped, IRLS_iter); Daniel@0: Daniel@0: engine = jtree_inf_engine(bnet); Daniel@0: Daniel@0: x = rand(1, input_dim); Daniel@0: q = glmfwd(net, x); Daniel@0: Daniel@0: [engine, ll] = enter_evidence(engine, {x, []}); Daniel@0: m = marginal_nodes(engine, 2); Daniel@0: assert(approxeq(m.T(:), q(:))); Daniel@0: Daniel@0: Daniel@0: % Check learning Daniel@0: % We use EM, but in fact there is no hidden data. Daniel@0: % The M step will call IRLS on the softmax node. Daniel@0: Daniel@0: % Generate data from three classes in 2d Daniel@0: input_dim = 2; Daniel@0: num_classes = 3; Daniel@0: Daniel@0: % Fix seeds for reproducible results Daniel@0: randn('state', 42); Daniel@0: rand('state', 42); Daniel@0: Daniel@0: ndata = 10; Daniel@0: % Generate mixture of three Gaussians in two dimensional space Daniel@0: data = randn(ndata, input_dim); Daniel@0: targets = zeros(ndata, 3); Daniel@0: Daniel@0: % Priors for the clusters Daniel@0: prior(1) = 0.4; Daniel@0: prior(2) = 0.3; Daniel@0: prior(3) = 0.3; Daniel@0: Daniel@0: % Cluster centres Daniel@0: c = [2.0, 2.0; 0.0, 0.0; 1, -1]; Daniel@0: Daniel@0: ndata1 = prior(1)*ndata; Daniel@0: ndata2 = (prior(1) + prior(2))*ndata; Daniel@0: % Put first cluster at (2, 2) Daniel@0: data(1:ndata1, 1) = data(1:ndata1, 1) * 0.5 + c(1,1); Daniel@0: data(1:ndata1, 2) = data(1:ndata1, 2) * 0.5 + c(1,2); Daniel@0: targets(1:ndata1, 1) = 1; Daniel@0: Daniel@0: % Leave second cluster at (0,0) Daniel@0: data((ndata1 + 1):ndata2, :) = ... Daniel@0: data((ndata1 + 1):ndata2, :); Daniel@0: targets((ndata1+1):ndata2, 2) = 1; Daniel@0: Daniel@0: data((ndata2+1):ndata, 1) = data((ndata2+1):ndata,1) *0.6 + c(3, 1); Daniel@0: data((ndata2+1):ndata, 2) = data((ndata2+1):ndata,2) *0.6 + c(3, 2); Daniel@0: targets((ndata2+1):ndata, 3) = 1; Daniel@0: Daniel@0: Daniel@0: if 0 Daniel@0: ndata = 1; Daniel@0: data = x; Daniel@0: targets = [1 0 0]; Daniel@0: end Daniel@0: Daniel@0: options = foptions; Daniel@0: options(1) = -1; % verbose Daniel@0: options(14) = IRLS_iter; Daniel@0: [net2, options2] = glmtrain(net, options, data, targets); Daniel@0: net2.ll = options2(8); % type 'help foptions' for details Daniel@0: Daniel@0: cases = cell(2, ndata); Daniel@0: for l=1:ndata Daniel@0: q = find(targets(l,:)==1); Daniel@0: x = data(l,:); Daniel@0: cases{1,l} = x(:); Daniel@0: cases{2,l} = q; Daniel@0: end Daniel@0: Daniel@0: max_iter = 2; % we have complete observability, so 1 iter is enough Daniel@0: [bnet2, ll2] = learn_params_em(engine, cases, max_iter); Daniel@0: Daniel@0: w = get_field(bnet2.CPD{2},'weights'); Daniel@0: b = get_field(bnet2.CPD{2},'offset')'; Daniel@0: Daniel@0: w Daniel@0: net2.w1 Daniel@0: Daniel@0: b Daniel@0: net2.b1 Daniel@0: Daniel@0: % assert(approxeq(net2.ll, ll2)); % glmtrain returns ll after final M step, learn_params before Daniel@0: