wolffd@0: function CPD = maximize_params(CPD, temp) wolffd@0: % MAXIMIZE_PARAMS Set the params of a CPD to their ML values (Gaussian) wolffd@0: % CPD = maximize_params(CPD, temperature) wolffd@0: % wolffd@0: % Temperature is currently only used for entropic prior on Sigma wolffd@0: wolffd@0: % For details, see "Fitting a Conditional Gaussian Distribution", Kevin Murphy, tech. report, wolffd@0: % 1998, available at www.cs.berkeley.edu/~murphyk/papers.html wolffd@0: % Refering to table 2, we use equations 1/2 to estimate the covariance matrix in the untied/tied case, wolffd@0: % and equation 9 to estimate the weight matrix and mean. wolffd@0: % We do not implement spherical Gaussians - the code is already pretty complicated! wolffd@0: wolffd@0: if ~adjustable_CPD(CPD), return; end wolffd@0: wolffd@0: %assert(approxeq(CPD.nsamples, sum(CPD.Wsum))); wolffd@0: assert(~any(isnan(CPD.WXXsum))) wolffd@0: assert(~any(isnan(CPD.WXYsum))) wolffd@0: assert(~any(isnan(CPD.WYYsum))) wolffd@0: wolffd@0: [self_size cpsize dpsize] = size(CPD.weights); wolffd@0: wolffd@0: % Append 1s to the parents, and derive the corresponding cross products. wolffd@0: % This is used when estimate the means and weights simultaneosuly, wolffd@0: % and when estimatting Sigma. wolffd@0: % Let x2 = [x 1]' wolffd@0: XY = zeros(cpsize+1, self_size, dpsize); % XY(:,:,i) = sum_l w(l,i) x2(l) y(l)' wolffd@0: XX = zeros(cpsize+1, cpsize+1, dpsize); % XX(:,:,i) = sum_l w(l,i) x2(l) x2(l)' wolffd@0: YY = zeros(self_size, self_size, dpsize); % YY(:,:,i) = sum_l w(l,i) y(l) y(l)' wolffd@0: for i=1:dpsize wolffd@0: XY(:,:,i) = [CPD.WXYsum(:,:,i) % X*Y wolffd@0: CPD.WYsum(:,i)']; % 1*Y wolffd@0: % [x * [x' 1] = [xx' x wolffd@0: % 1] x' 1] wolffd@0: XX(:,:,i) = [CPD.WXXsum(:,:,i) CPD.WXsum(:,i); wolffd@0: CPD.WXsum(:,i)' CPD.Wsum(i)]; wolffd@0: YY(:,:,i) = CPD.WYYsum(:,:,i); wolffd@0: end wolffd@0: wolffd@0: w = CPD.Wsum(:); wolffd@0: % Set any zeros to one before dividing wolffd@0: % This is valid because w(i)=0 => WYsum(:,i)=0, etc wolffd@0: w = w + (w==0); wolffd@0: wolffd@0: if CPD.clamped_mean wolffd@0: % Estimating B2 and then setting the last column (the mean) to the clamped mean is *not* equivalent wolffd@0: % to estimating B and then adding the clamped_mean to the last column. wolffd@0: if ~CPD.clamped_weights wolffd@0: B = zeros(self_size, cpsize, dpsize); wolffd@0: for i=1:dpsize wolffd@0: if det(CPD.WXXsum(:,:,i))==0 wolffd@0: B(:,:,i) = 0; wolffd@0: else wolffd@0: % Eqn 9 in table 2 of TR wolffd@0: %B(:,:,i) = CPD.WXYsum(:,:,i)' * inv(CPD.WXXsum(:,:,i)); wolffd@0: B(:,:,i) = (CPD.WXXsum(:,:,i) \ CPD.WXYsum(:,:,i))'; wolffd@0: end wolffd@0: end wolffd@0: %CPD.weights = reshape(B, [self_size cpsize dpsize]); wolffd@0: CPD.weights = B; wolffd@0: end wolffd@0: elseif CPD.clamped_weights % KPM 1/25/02 wolffd@0: if ~CPD.clamped_mean % ML estimate is just sample mean of the residuals wolffd@0: for i=1:dpsize wolffd@0: CPD.mean(:,i) = (CPD.WYsum(:,i) - CPD.weights(:,:,i) * CPD.WXsum(:,i)) / w(i); wolffd@0: end wolffd@0: end wolffd@0: else % nothing is clamped, so estimate mean and weights simultaneously wolffd@0: B2 = zeros(self_size, cpsize+1, dpsize); wolffd@0: for i=1:dpsize wolffd@0: if det(XX(:,:,i))==0 % fix by U. Sondhauss 6/27/99 wolffd@0: B2(:,:,i)=0; wolffd@0: else wolffd@0: % Eqn 9 in table 2 of TR wolffd@0: %B2(:,:,i) = XY(:,:,i)' * inv(XX(:,:,i)); wolffd@0: B2(:,:,i) = (XX(:,:,i) \ XY(:,:,i))'; wolffd@0: end wolffd@0: CPD.mean(:,i) = B2(:,cpsize+1,i); wolffd@0: CPD.weights(:,:,i) = B2(:,1:cpsize,i); wolffd@0: end wolffd@0: end wolffd@0: wolffd@0: % Let B2 = [W mu] wolffd@0: if cpsize>0 wolffd@0: B2(:,1:cpsize,:) = reshape(CPD.weights, [self_size cpsize dpsize]); wolffd@0: end wolffd@0: B2(:,cpsize+1,:) = reshape(CPD.mean, [self_size dpsize]); wolffd@0: wolffd@0: % To avoid singular covariance matrices, wolffd@0: % we use the regularization method suggested in "A Quasi-Bayesian approach to estimating wolffd@0: % parameters for mixtures of normal distributions", Hamilton 91. wolffd@0: % If the ML estimate is Sigma = M/N, the MAP estimate is (M+gamma*I) / (N+gamma), wolffd@0: % where gamma >=0 is a smoothing parameter (equivalent sample size of I prior) wolffd@0: wolffd@0: gamma = CPD.cov_prior_weight; wolffd@0: wolffd@0: if ~CPD.clamped_cov wolffd@0: if CPD.cov_prior_entropic % eqn 12 of Brand AI/Stat 99 wolffd@0: Z = 1-temp; wolffd@0: % When temp > 1, Z is negative, so we are dividing by a smaller wolffd@0: % number, ie. increasing the variance. wolffd@0: else wolffd@0: Z = 0; wolffd@0: end wolffd@0: if CPD.tied_cov wolffd@0: S = zeros(self_size, self_size); wolffd@0: % Eqn 2 from table 2 in TR wolffd@0: for i=1:dpsize wolffd@0: S = S + (YY(:,:,i) - B2(:,:,i)*XY(:,:,i)); wolffd@0: end wolffd@0: %denom = max(1, CPD.nsamples + gamma + Z); wolffd@0: denom = CPD.nsamples + gamma + Z; wolffd@0: S = (S + gamma*eye(self_size)) / denom; wolffd@0: if strcmp(CPD.cov_type, 'diag') wolffd@0: S = diag(diag(S)); wolffd@0: end wolffd@0: CPD.cov = repmat(S, [1 1 dpsize]); wolffd@0: else wolffd@0: for i=1:dpsize wolffd@0: % Eqn 1 from table 2 in TR wolffd@0: S = YY(:,:,i) - B2(:,:,i)*XY(:,:,i); wolffd@0: %denom = max(1, w(i) + gamma + Z); % gives wrong answers on mhmm1 wolffd@0: denom = w(i) + gamma + Z; wolffd@0: S = (S + gamma*eye(self_size)) / denom; wolffd@0: CPD.cov(:,:,i) = S; wolffd@0: end wolffd@0: if strcmp(CPD.cov_type, 'diag') wolffd@0: for i=1:dpsize wolffd@0: CPD.cov(:,:,i) = diag(diag(CPD.cov(:,:,i))); wolffd@0: end wolffd@0: end wolffd@0: end wolffd@0: end wolffd@0: wolffd@0: wolffd@0: check_covars = 0; wolffd@0: min_covar = 1e-5; wolffd@0: if check_covars % prevent collapsing to a point wolffd@0: for i=1:dpsize wolffd@0: if min(svd(CPD.cov(:,:,i))) < min_covar wolffd@0: disp(['resetting singular covariance for node ' num2str(CPD.self)]); wolffd@0: CPD.cov(:,:,i) = CPD.init_cov(:,:,i); wolffd@0: end wolffd@0: end wolffd@0: end wolffd@0: wolffd@0: wolffd@0: