annotate toolboxes/FullBNT-1.0.7/bnt/CPDs/@softmax_CPD/softmax_CPD.m @ 0:e9a9cd732c1e tip

first hg version after svn
author wolffd
date Tue, 10 Feb 2015 15:05:51 +0000
parents
children
rev   line source
wolffd@0 1 function CPD = softmax_CPD(bnet, self, varargin)
wolffd@0 2 % SOFTMAX_CPD Make a softmax (multinomial logit) CPD
wolffd@0 3 %
wolffd@0 4 % To define this CPD precisely, let W be an (m x n) matrix with W(i,:) = {i-th row of B}
wolffd@0 5 % => we can define the following vectorial function:
wolffd@0 6 %
wolffd@0 7 % softmax: R^n |--> R^m
wolffd@0 8 % softmax(z,i-th)=exp(W(i,:)*z)/sum_k(exp(W(k,:)*z))
wolffd@0 9 %
wolffd@0 10 % (this constructor augments z with a one at the beginning to introduce an offset term (=bias, intercept))
wolffd@0 11 % Now call the continuous (cts) and always observed (obs) parents X,
wolffd@0 12 % the discrete parents (if any) Q, and this node Y then we use the discrete parent(s) just to index
wolffd@0 13 % the parameter vectors (c.f., conditional Gaussian nodes); that is:
wolffd@0 14 % prob(Y=i | X=x, Q=j) = softmax(x,i-th|j)
wolffd@0 15 % where '|j' means that we are using the j-th (m x n) parameters matrix W(:,:,j).
wolffd@0 16 % If there are no discrete parents, this is a regular softmax node.
wolffd@0 17 % If Y is binary, this is a logistic (sigmoid) function.
wolffd@0 18 %
wolffd@0 19 % CPD = softmax_CPD(bnet, node_num, ...) will create a softmax CPD with random parameters,
wolffd@0 20 % where node is the number of a node in this equivalence class.
wolffd@0 21 %
wolffd@0 22 % The following optional arguments can be specified in the form of name/value pairs:
wolffd@0 23 % [default value in brackets]
wolffd@0 24 % (Let ns(i) be the size of node i, X = ns(X), Y = ns(Y), Q1=ns(dps(1)), Q2=ns(dps(2)), ...
wolffd@0 25 % where dps are the discrete parents; if there are no discrete parents, we set Q1=1.)
wolffd@0 26 %
wolffd@0 27 % discrete - the discrete parents that we want to treat like the cts ones [ [] ].
wolffd@0 28 % This can be used to define sigmoid belief network - see below the reference.
wolffd@0 29 % For example suppose that Y has one cts parents X and two discrete ones: Q, C1 where:
wolffd@0 30 % -> Q is binary (1/2) and used just to index the parameters of 'self'
wolffd@0 31 % -> C1 is ternary (1/2/3) and treated as a cts node <=> its values appear into the linear
wolffd@0 32 % part of the softmax function
wolffd@0 33 % then:
wolffd@0 34 % prob(Y|X=x, Q=q, C1=c1)= softmax(W(:,:,q)' * y)
wolffd@0 35 % where y = [1 | delta(C1,1) delta(C1,2) delta(C1,3) | x(:)']' and delta(Y,a)=indicator(Y=a).
wolffd@0 36 % weights - (w(:,j,a,b,...) - w(:,j',a,b,...)) is ppn to dec. boundary
wolffd@0 37 % between j,j' given Q1=a,Q2=b,... [ randn(X,Y,Q1,Q2,...) ]
wolffd@0 38 % offset - (b(j,a,b,...) - b(j',a,b,...)) is the offset to dec. boundary
wolffd@0 39 % between j,j' given Q1=a,Q2=b,... [ randn(Y,Q1,Q2,...) ]
wolffd@0 40 %
wolffd@0 41 % e.g., CPD = softmax_CPD(bnet, i, 'offset', zeros(ns(i),1));
wolffd@0 42 %
wolffd@0 43 % The following fields control the behavior of the M step, which uses
wolffd@0 44 % a weighted version of the Iteratively Reweighted Least Squares (WIRLS) if dps_as_cps=[]; or
wolffd@0 45 % a weighted SCG otherwise, as implemented in Netlab, and modified by Pierpaolo Brutti.
wolffd@0 46 %
wolffd@0 47 % clamped - 'yes' means don't adjust params during learning ['no']
wolffd@0 48 % max_iter - the maximum number of steps to take [10]
wolffd@0 49 % verbose - 'yes' means print the LL at each step of IRLS ['no']
wolffd@0 50 % wthresh - convergence threshold for weights [1e-2]
wolffd@0 51 % llthresh - convergence threshold for log likelihood [1e-2]
wolffd@0 52 % approx_hess - 'yes' means approximate the Hessian for speed ['no']
wolffd@0 53 %
wolffd@0 54 % For backwards compatibility with BNT2, you can also specify the parameters in the following order
wolffd@0 55 % softmax_CPD(bnet, self, w, b, clamped, max_iter, verbose, wthresh, llthresh, approx_hess)
wolffd@0 56 %
wolffd@0 57 % REFERENCE
wolffd@0 58 % For details on the sigmoid belief nets, see:
wolffd@0 59 % - Neal (1992). Connectionist learning of belief networks, Artificial Intelligence, 56, 71-113.
wolffd@0 60 % - Saul, Jakkola, Jordan (1996). Mean field theory for sigmoid belief networks, Journal of Artificial Intelligence Reseach (4), pagg. 61-76.
wolffd@0 61 %
wolffd@0 62 % For details on the M step, see:
wolffd@0 63 % - K. Chen, L. Xu, H. Chi (1999). Improved learning algorithms for mixtures of experts in multiclass
wolffd@0 64 % classification. Neural Networks 12, pp. 1229-1252.
wolffd@0 65 % - M.I. Jordan, R.A. Jacobs (1994). Hierarchical Mixtures of Experts and the EM algorithm.
wolffd@0 66 % Neural Computation 6, pp. 181-214.
wolffd@0 67 % - S.R. Waterhouse, A.J. Robinson (1994). Classification Using Hierarchical Mixtures of Experts. In Proc. IEEE
wolffd@0 68 % Workshop on Neural Network for Signal Processing IV, pp. 177-186
wolffd@0 69
wolffd@0 70 if nargin==0
wolffd@0 71 % This occurs if we are trying to load an object from a file.
wolffd@0 72 CPD = init_fields;
wolffd@0 73 CPD = class(CPD, 'softmax_CPD', discrete_CPD(0, []));
wolffd@0 74 return;
wolffd@0 75 elseif isa(bnet, 'softmax_CPD')
wolffd@0 76 % This might occur if we are copying an object.
wolffd@0 77 CPD = bnet;
wolffd@0 78 return;
wolffd@0 79 end
wolffd@0 80 CPD = init_fields;
wolffd@0 81
wolffd@0 82 assert(myismember(self, bnet.dnodes));
wolffd@0 83 ns = bnet.node_sizes;
wolffd@0 84 ps = parents(bnet.dag, self);
wolffd@0 85 dps = myintersect(ps, bnet.dnodes);
wolffd@0 86 cps = myintersect(ps, bnet.cnodes);
wolffd@0 87
wolffd@0 88 clamped = 0;
wolffd@0 89 CPD = class(CPD, 'softmax_CPD', discrete_CPD(clamped, ns([ps self])));
wolffd@0 90
wolffd@0 91 dps_as_cpssz = 0;
wolffd@0 92 dps_as_cps = [];
wolffd@0 93 % determine if any discrete parents are to be treated as cts
wolffd@0 94 if nargin >= 3 & isstr(varargin{1}) % might have passed in 'discrete'
wolffd@0 95 for i=1:2:length(varargin)
wolffd@0 96 if strcmp(varargin{i}, 'discrete')
wolffd@0 97 dps_as_cps = varargin{i+1};
wolffd@0 98 assert(myismember(dps_as_cps, dps));
wolffd@0 99 dps = mysetdiff(dps, dps_as_cps); % put out the dps treated as cts
wolffd@0 100 CPD.dps_as_cps.ndx = find_equiv_posns(dps_as_cps, ps);
wolffd@0 101 CPD.dps_as_cps.separator = [0 cumsum(ns(dps_as_cps(1:end-1)))]; % concatenated dps_as_cps dims separators
wolffd@0 102 dps_as_cpssz = sum(ns(dps_as_cps));
wolffd@0 103 break;
wolffd@0 104 end
wolffd@0 105 end
wolffd@0 106 end
wolffd@0 107 assert(~isempty(union(cps, dps_as_cps))); % It have to be at least a cts or a dps_as_cps parents
wolffd@0 108 self_size = ns(self);
wolffd@0 109 cpsz = sum(ns(cps));
wolffd@0 110 glimsz = prod(ns(dps));
wolffd@0 111 CPD.dpndx = find_equiv_posns(dps, ps); % it contains only the indeces of the 'pure' dps
wolffd@0 112 CPD.cpndx = find_equiv_posns(cps, ps);
wolffd@0 113
wolffd@0 114 CPD.self = self;
wolffd@0 115 CPD.solo = (length(ns)<=2);
wolffd@0 116 CPD.sizes = bnet.node_sizes([ps self]);
wolffd@0 117
wolffd@0 118 % set default params
wolffd@0 119 CPD.max_iter = 10;
wolffd@0 120 CPD.verbose = 0;
wolffd@0 121 CPD.wthresh = 1e-2;
wolffd@0 122 CPD.llthresh = 1e-2;
wolffd@0 123 CPD.approx_hess = 0;
wolffd@0 124 CPD.glim = cell(1,glimsz);
wolffd@0 125 for i=1:glimsz
wolffd@0 126 CPD.glim{i} = glm(dps_as_cpssz + cpsz, self_size, 'softmax');
wolffd@0 127 end
wolffd@0 128
wolffd@0 129 if nargin >= 3
wolffd@0 130 args = varargin;
wolffd@0 131 nargs = length(args);
wolffd@0 132 if ~isstr(args{1})
wolffd@0 133 % softmax_CPD(bnet, self, w, b, clamped, max_iter, verbose, wthresh, llthresh, approx_hess)
wolffd@0 134 if nargs >= 1 & ~isempty(args{1}), CPD = set_fields(CPD, 'weights', args{1}); end
wolffd@0 135 if nargs >= 2 & ~isempty(args{2}), CPD = set_fields(CPD, 'offset', args{2}); end
wolffd@0 136 if nargs >= 3 & ~isempty(args{3}), CPD = set_clamped(CPD, args{3}); end
wolffd@0 137 if nargs >= 4 & ~isempty(args{4}), CPD.max_iter = args{4}; end
wolffd@0 138 if nargs >= 5 & ~isempty(args{5}), CPD.verbose = args{5}; end
wolffd@0 139 if nargs >= 6 & ~isempty(args{6}), CPD.wthresh = args{6}; end
wolffd@0 140 if nargs >= 7 & ~isempty(args{7}), CPD.llthresh = args{7}; end
wolffd@0 141 if nargs >= 8 & ~isempty(args{8}), CPD.approx_hess = args{8}; end
wolffd@0 142 else
wolffd@0 143 CPD = set_fields(CPD, args{:});
wolffd@0 144 end
wolffd@0 145 end
wolffd@0 146
wolffd@0 147 % sufficient statistics
wolffd@0 148 % Since dsoftmax is not in the exponential family, we must store all the raw data.
wolffd@0 149 CPD.parent_vals = []; % X(l,:) = value of cts parents in l'th example
wolffd@0 150 CPD.self_vals = []; % Y(l,:) = value of self in l'th example
wolffd@0 151
wolffd@0 152 CPD.eso_weights=[]; % weights used by the WIRLS algorithm
wolffd@0 153
wolffd@0 154 % For BIC
wolffd@0 155 CPD.nsamples = 0;
wolffd@0 156 if ~adjustable_CPD(CPD),
wolffd@0 157 CPD.nparams=0;
wolffd@0 158 else
wolffd@0 159 [W, b] = extract_params(CPD);
wolffd@0 160 CPD.nparams= prod(size(W)) + prod(size(b));
wolffd@0 161 end
wolffd@0 162
wolffd@0 163 %%%%%%%%%%%
wolffd@0 164
wolffd@0 165 function CPD = init_fields()
wolffd@0 166 % This ensures we define the fields in the same order
wolffd@0 167 % no matter whether we load an object from a file,
wolffd@0 168 % or create it from scratch. (Matlab requires this.)
wolffd@0 169
wolffd@0 170 CPD.glim = {};
wolffd@0 171 CPD.self = [];
wolffd@0 172 CPD.solo = [];
wolffd@0 173 CPD.max_iter = [];
wolffd@0 174 CPD.verbose = [];
wolffd@0 175 CPD.wthresh = [];
wolffd@0 176 CPD.llthresh = [];
wolffd@0 177 CPD.approx_hess = [];
wolffd@0 178 CPD.sizes = [];
wolffd@0 179 CPD.parent_vals = [];
wolffd@0 180 CPD.eso_weights=[];
wolffd@0 181 CPD.self_vals = [];
wolffd@0 182 CPD.nsamples = [];
wolffd@0 183 CPD.nparams = [];
wolffd@0 184 CPD.dpndx = [];
wolffd@0 185 CPD.cpndx = [];
wolffd@0 186 CPD.dps_as_cps.ndx = [];
wolffd@0 187 CPD.dps_as_cps.separator = [];