To check out this repository please hg clone the following URL, or open the URL using EasyMercurial or your preferred Mercurial client.

Statistics Download as Zip
| Branch: | Revision:

root / _FullBNT / BNT / CPDs / @gaussian_CPD / Old / maximize_params.m @ 8:b5b38998ef3b

History | View | Annotate | Download (4.76 KB)

1
function CPD = maximize_params(CPD, temp)
2
% MAXIMIZE_PARAMS Set the params of a CPD to their ML values (Gaussian)
3
% CPD = maximize_params(CPD, temperature)
4
%
5
% Temperature is currently only used for entropic prior on Sigma
6

    
7
% For details, see "Fitting a Conditional Gaussian Distribution", Kevin Murphy, tech. report,
8
% 1998, available at www.cs.berkeley.edu/~murphyk/papers.html
9
% Refering to table 2, we use equations 1/2 to estimate the covariance matrix in the untied/tied case,
10
% and equation 9 to estimate the weight matrix and mean.
11
% We do not implement spherical Gaussians - the code is already pretty complicated!
12

    
13
if ~adjustable_CPD(CPD), return; end
14

    
15
%assert(approxeq(CPD.nsamples, sum(CPD.Wsum)));
16
assert(~any(isnan(CPD.WXXsum)))
17
assert(~any(isnan(CPD.WXYsum)))
18
assert(~any(isnan(CPD.WYYsum)))
19

    
20
[self_size cpsize dpsize] = size(CPD.weights);
21

    
22
% Append 1s to the parents, and derive the corresponding cross products.
23
% This is used when estimate the means and weights simultaneosuly,
24
% and when estimatting Sigma.
25
% Let x2 = [x 1]'
26
XY = zeros(cpsize+1, self_size, dpsize); % XY(:,:,i) = sum_l w(l,i) x2(l) y(l)' 
27
XX = zeros(cpsize+1, cpsize+1, dpsize); % XX(:,:,i) = sum_l w(l,i) x2(l) x2(l)' 
28
YY = zeros(self_size, self_size, dpsize); % YY(:,:,i) = sum_l w(l,i) y(l) y(l)' 
29
for i=1:dpsize
30
  XY(:,:,i) = [CPD.WXYsum(:,:,i) % X*Y
31
	       CPD.WYsum(:,i)']; % 1*Y
32
  % [x  * [x' 1]  = [xx' x
33
  %  1]              x'  1]
34
  XX(:,:,i) = [CPD.WXXsum(:,:,i) CPD.WXsum(:,i);
35
	       CPD.WXsum(:,i)'   CPD.Wsum(i)];
36
  YY(:,:,i) = CPD.WYYsum(:,:,i);
37
end
38

    
39
w = CPD.Wsum(:);
40
% Set any zeros to one before dividing
41
% This is valid because w(i)=0 => WYsum(:,i)=0, etc
42
w = w + (w==0);
43

    
44
if CPD.clamped_mean
45
  % Estimating B2 and then setting the last column (the mean) to the clamped mean is *not* equivalent
46
  % to estimating B and then adding the clamped_mean to the last column.
47
  if ~CPD.clamped_weights
48
    B = zeros(self_size, cpsize, dpsize);
49
    for i=1:dpsize
50
      if det(CPD.WXXsum(:,:,i))==0
51
	B(:,:,i) = 0;
52
      else
53
	% Eqn 9 in table 2 of TR
54
	%B(:,:,i) = CPD.WXYsum(:,:,i)' * inv(CPD.WXXsum(:,:,i));
55
	B(:,:,i) = (CPD.WXXsum(:,:,i) \ CPD.WXYsum(:,:,i))';
56
      end
57
    end
58
    %CPD.weights = reshape(B, [self_size cpsize dpsize]);
59
    CPD.weights = B;
60
  end
61
elseif CPD.clamped_weights % KPM 1/25/02
62
  if ~CPD.clamped_mean % ML estimate is just sample mean of the residuals
63
    for i=1:dpsize
64
      CPD.mean(:,i) = (CPD.WYsum(:,i) - CPD.weights(:,:,i) * CPD.WXsum(:,i)) / w(i);
65
    end
66
  end
67
else % nothing is clamped, so estimate mean and weights simultaneously
68
  B2 = zeros(self_size, cpsize+1, dpsize);
69
  for i=1:dpsize
70
    if det(XX(:,:,i))==0  % fix by U. Sondhauss 6/27/99
71
      B2(:,:,i)=0;          
72
    else                    
73
      % Eqn 9 in table 2 of TR
74
      %B2(:,:,i) = XY(:,:,i)' * inv(XX(:,:,i));
75
      B2(:,:,i) = (XX(:,:,i) \ XY(:,:,i))';
76
    end                   
77
    CPD.mean(:,i) = B2(:,cpsize+1,i);
78
    CPD.weights(:,:,i) = B2(:,1:cpsize,i);
79
  end
80
end
81

    
82
% Let B2 = [W mu]
83
if cpsize>0
84
  B2(:,1:cpsize,:) = reshape(CPD.weights, [self_size cpsize dpsize]);
85
end
86
B2(:,cpsize+1,:) = reshape(CPD.mean, [self_size dpsize]);
87

    
88
% To avoid singular covariance matrices,
89
% we use the regularization method suggested in "A Quasi-Bayesian approach to estimating
90
% parameters for mixtures of normal distributions", Hamilton 91.
91
% If the ML estimate is Sigma = M/N, the MAP estimate is (M+gamma*I) / (N+gamma),
92
% where gamma >=0 is a smoothing parameter (equivalent sample size of I prior)
93

    
94
gamma = CPD.cov_prior_weight;
95

    
96
if ~CPD.clamped_cov
97
  if CPD.cov_prior_entropic % eqn 12 of Brand AI/Stat 99
98
    Z = 1-temp;
99
    % When temp > 1, Z is negative, so we are dividing by a smaller
100
    % number, ie. increasing the variance.
101
  else
102
    Z = 0;
103
  end
104
  if CPD.tied_cov
105
    S = zeros(self_size, self_size);
106
    % Eqn 2 from table 2 in TR
107
    for i=1:dpsize
108
      S = S + (YY(:,:,i) - B2(:,:,i)*XY(:,:,i));
109
    end
110
    %denom = max(1, CPD.nsamples + gamma + Z);
111
    denom = CPD.nsamples + gamma + Z;
112
    S = (S + gamma*eye(self_size)) / denom;
113
    if strcmp(CPD.cov_type, 'diag')
114
      S = diag(diag(S));
115
    end
116
    CPD.cov = repmat(S, [1 1 dpsize]);
117
  else 
118
    for i=1:dpsize      
119
      % Eqn 1 from table 2 in TR
120
      S = YY(:,:,i) - B2(:,:,i)*XY(:,:,i);
121
      %denom = max(1, w(i) + gamma + Z); % gives wrong answers on mhmm1
122
      denom = w(i) + gamma + Z;
123
      S = (S + gamma*eye(self_size)) / denom;
124
      CPD.cov(:,:,i) = S;
125
    end
126
    if strcmp(CPD.cov_type, 'diag')
127
      for i=1:dpsize      
128
	CPD.cov(:,:,i) = diag(diag(CPD.cov(:,:,i)));
129
      end
130
    end
131
  end
132
end
133

    
134

    
135
check_covars = 0;
136
min_covar = 1e-5;
137
if check_covars % prevent collapsing to a point
138
  for i=1:dpsize
139
    if min(svd(CPD.cov(:,:,i))) < min_covar
140
      disp(['resetting singular covariance for node ' num2str(CPD.self)]);
141
      CPD.cov(:,:,i) = CPD.init_cov(:,:,i);
142
    end
143
  end
144
end
145

    
146

    
147